Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next

Steffen Klassert says:

====================
pull request (net-next): ipsec-next 2015-10-30

1) The flow cache is limited by the flow cache limit which
   depends on the number of cpus and the xfrm garbage collector
   threshold which is independent of the number of cpus. This
   leads to the fact that on systems with more than 16 cpus
   we hit the xfrm garbage collector limit and refuse new
   allocations, so new flows are dropped. On systems with 16
   or less cpus, we hit the flowcache limit. In this case, we
   shrink the flow cache instead of refusing new flows.

   We increase the xfrm garbage collector threshold to INT_MAX
   to get the same behaviour, independent of the number of cpus.

2) Fix some unaligned accesses on sparc systems.
   From Sowmini Varadhan.

3) Fix some header checks in _decode_session4. We may call
   pskb_may_pull with a negative value converted to unsigened
   int from pskb_may_pull. This can lead to incorrect policy
   lookups. We fix this by a check of the data pointer position
   before we call pskb_may_pull.

4) Reload skb header pointers after calling pskb_may_pull
   in _decode_session4 as this may change the pointers into
   the packet.

5) Add a missing statistic counter on inner mode errors.

Please pull or let me know if there are problems.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/Documentation/Changes b/Documentation/Changes
index 6d88630..f447f05 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -43,7 +43,7 @@
 o  grub                   0.93                    # grub --version || grub-install --version
 o  mcelog                 0.6                     # mcelog --version
 o  iptables               1.4.2                   # iptables -V
-o  openssl & libcrypto    1.0.1k                  # openssl version
+o  openssl & libcrypto    1.0.0                   # openssl version
 
 
 Kernel compilation
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index aac9357..f9b9ad7 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -154,8 +154,9 @@
 !Finclude/net/cfg80211.h cfg80211_scan_request
 !Finclude/net/cfg80211.h cfg80211_scan_done
 !Finclude/net/cfg80211.h cfg80211_bss
-!Finclude/net/cfg80211.h cfg80211_inform_bss_width_frame
-!Finclude/net/cfg80211.h cfg80211_inform_bss_width
+!Finclude/net/cfg80211.h cfg80211_inform_bss
+!Finclude/net/cfg80211.h cfg80211_inform_bss_frame_data
+!Finclude/net/cfg80211.h cfg80211_inform_bss_data
 !Finclude/net/cfg80211.h cfg80211_unlink_bss
 !Finclude/net/cfg80211.h cfg80211_find_ie
 !Finclude/net/cfg80211.h ieee80211_bss_get_ie
diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt
index 0d5bc46..ad6949b 100644
--- a/Documentation/device-mapper/snapshot.txt
+++ b/Documentation/device-mapper/snapshot.txt
@@ -41,9 +41,13 @@
 the amount of free space and expand the <COW device> before it fills up.
 
 <persistent?> is P (Persistent) or N (Not persistent - will not survive
-after reboot).
-The difference is that for transient snapshots less metadata must be
-saved on disk - they can be kept in memory by the kernel.
+after reboot).  O (Overflow) can be added as a persistent store option
+to allow userspace to advertise its support for seeing "Overflow" in the
+snapshot status.  So supported store types are "P", "PO" and "N".
+
+The difference between persistent and transient is with transient
+snapshots less metadata must be saved on disk - they can be kept in
+memory by the kernel.
 
 
 * snapshot-merge <origin> <COW device> <persistent> <chunksize>
diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
index 635a3b0..8d91ba9 100644
--- a/Documentation/devicetree/bindings/input/cypress,cyapa.txt
+++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
@@ -25,7 +25,7 @@
 		/* Cypress Gen3 touchpad */
 		touchpad@67 {
 			compatible = "cypress,cyapa";
-			reg = <0x24>;
+			reg = <0x67>;
 			interrupt-parent = <&gpio>;
 			interrupts = <2 IRQ_TYPE_EDGE_FALLING>;	/* GPIO 2 */
 			wakeup-source;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
index 391717a..ec96b1f 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
@@ -4,8 +4,8 @@
 interrupt.
 
 Required Properties:
-- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc"
-  as fallback
+- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or
+  "qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc"
 - reg: Base address and size of the controllers memory area
 - interrupt-parent: phandle of the parent interrupt controller.
 - interrupts: Interrupt specifier for the controllers interrupt.
@@ -13,6 +13,9 @@
 - #interrupt-cells : Specifies the number of cells needed to encode interrupt
 		     source, should be 1
 
+Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x,
+use ar7240 for all other SoCs.
+
 Please refer to interrupts.txt in this directory for details of the common
 Interrupt Controllers bindings used by client devices.
 
@@ -28,3 +31,16 @@
 		interrupt-controller;
 		#interrupt-cells = <1>;
 	};
+
+Another example:
+
+	interrupt-controller@18060010 {
+		compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc";
+		reg = <0x18060010 0x4>;
+
+		interrupt-parent = <&cpuintc>;
+		interrupts = <6>;
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
index f55aa28..078060a 100644
--- a/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
+++ b/Documentation/devicetree/bindings/net/apm-xgene-enet.txt
@@ -37,6 +37,14 @@
 
 Optional properties:
 - status: Should be "ok" or "disabled" for enabled/disabled. Default is "ok".
+- tx-delay: Delay value for RGMII bridge TX clock.
+	    Valid values are between 0 to 7, that maps to
+	    417, 717, 1020, 1321, 1611, 1913, 2215, 2514 ps
+	    Default value is 4, which corresponds to 1611 ps
+- rx-delay: Delay value for RGMII bridge RX clock.
+	    Valid values are between 0 to 7, that maps to
+	    273, 589, 899, 1222, 1480, 1806, 2147, 2464 ps
+	    Default value is 2, which corresponds to 899 ps
 
 Example:
 	menetclk: menetclk {
@@ -72,5 +80,7 @@
 
 /* Board-specific peripheral configurations */
 &menet {
+	tx-delay = <4>;
+	rx-delay = <2>;
         status = "ok";
 };
diff --git a/Documentation/devicetree/bindings/net/brcm,iproc-mdio.txt b/Documentation/devicetree/bindings/net/brcm,iproc-mdio.txt
new file mode 100644
index 0000000..8ba9ed1
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/brcm,iproc-mdio.txt
@@ -0,0 +1,23 @@
+* Broadcom iProc MDIO bus controller
+
+Required properties:
+- compatible: should be "brcm,iproc-mdio"
+- reg: address and length of the register set for the MDIO interface
+- #size-cells: must be 1
+- #address-cells: must be 0
+
+Child nodes of this MDIO bus controller node are standard Ethernet PHY device
+nodes as described in Documentation/devicetree/bindings/net/phy.txt
+
+Example:
+
+mdio@18002000 {
+	compatible = "brcm,iproc-mdio";
+	reg = <0x18002000 0x8>;
+	#size-cells = <1>;
+	#address-cells = <0>;
+
+	enet-gphy@0 {
+		reg = <0>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 676ecf6..4efca56 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -46,6 +46,7 @@
 Optional properties:
 - dual_emac_res_vlan	: Specifies VID to be used to segregate the ports
 - mac-address		: See ethernet.txt file in the same directory
+- phy-handle		: See ethernet.txt file in the same directory
 
 Note: "ti,hwmods" field is used to fetch the base address and irq
 resources from TI, omap hwmod data base during device registration.
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index 1e97532..db74f0d 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -57,6 +57,10 @@
     "rgmii-id", as all other connection types are detected by hardware.
   - fsl,magic-packet : If present, indicates that the hardware supports
     waking up via magic packet.
+  - fsl,wake-on-filer : If present, indicates that the hardware supports
+    waking up by Filer General Purpose Interrupt (FGPI) asserted on the
+    Rx int line.  This is an advanced power management capability allowing
+    certain packet types (user) defined by filer rules to wake up the system.
   - bd-stash : If present, indicates that the hardware supports stashing
     buffer descriptors in the L2.
   - rx-stash-len : Denotes the number of bytes of a received buffer to stash
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt
index 9940aa0..9c23fdf 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hns-mdio.txt
@@ -12,7 +12,7 @@
          mdio@803c0000 {
                    #address-cells = <1>;
                    #size-cells = <0>;
-                   compatible = "hisilicon,mdio","hisilicon,hns-mdio";
+                   compatible = "hisilicon,hns-mdio","hisilicon,mdio";
                    reg = <0x0 0x803c0000 0x0 0x10000>;
 
                    ethernet-phy@0 {
diff --git a/Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt b/Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt
new file mode 100644
index 0000000..a4ed2ef
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt
@@ -0,0 +1,20 @@
+* MRF24J40 IEEE 802.15.4 *
+
+Required properties:
+  - compatible:		should be "microchip,mrf24j40", "microchip,mrf24j40ma",
+			or "microchip,mrf24j40mc" depends on your transceiver
+			board
+  - spi-max-frequency:	maximal bus speed, should be set something under or equal
+			10000000
+  - reg:		the chipselect index
+  - interrupts:		the interrupt generated by the device.
+
+Example:
+
+	mrf24j40ma@0 {
+		compatible = "microchip,mrf24j40ma";
+		spi-max-frequency = <8500000>;
+		reg = <0>;
+		interrupts = <19 8>;
+		interrupt-parent = <&gpio3>;
+	};
diff --git a/Documentation/devicetree/bindings/net/nfc/nfcmrvl.txt b/Documentation/devicetree/bindings/net/nfc/nfcmrvl.txt
index 7c4a0cc..76df917 100644
--- a/Documentation/devicetree/bindings/net/nfc/nfcmrvl.txt
+++ b/Documentation/devicetree/bindings/net/nfc/nfcmrvl.txt
@@ -1,7 +1,10 @@
 * Marvell International Ltd. NCI NFC Controller
 
 Required properties:
-- compatible: Should be "mrvl,nfc-uart".
+- compatible: Should be:
+  - "marvell,nfc-uart" or "mrvl,nfc-uart" for UART devices
+  - "marvell,nfc-i2c" for I2C devices
+  - "marvell,nfc-spi" for SPI devices
 
 Optional SoC specific properties:
 - pinctrl-names: Contains only one value - "default".
@@ -13,13 +16,19 @@
 - flow-control: Specifies that the chip is using RTS/CTS.
 - break-control: Specifies that the chip needs specific break management.
 
+Optional I2C-based chip specific properties:
+- i2c-int-falling: Specifies that the chip read event shall be trigged on
+  		   falling edge.
+- i2c-int-rising: Specifies that the chip read event shall be trigged on
+  		  rising edge.
+
 Example (for ARM-based BeagleBoard Black with 88W8887 on UART5):
 
 &uart5 {
 	status = "okay";
 
 	nfcmrvluart: nfcmrvluart@5 {
-		compatible = "mrvl,nfc-uart";
+		compatible = "marvell,nfc-uart";
 
 		reset-n-io = <&gpio3 16 0>;
 
@@ -27,3 +36,51 @@
 		flow-control;
         }
 };
+
+
+Example (for ARM-based BeagleBoard Black with 88W8887 on I2C1):
+
+&i2c1 {
+	status = "okay";
+	clock-frequency = <400000>;
+
+	nfcmrvli2c0: i2c@1 {
+		compatible = "marvell,nfc-i2c";
+
+		reg = <0x8>;
+
+		/* I2C INT configuration */
+		interrupt-parent = <&gpio3>;
+		interrupts = <21 0>;
+
+		/* I2C INT trigger configuration */
+		i2c-int-rising;
+
+		/* Reset IO */
+		reset-n-io = <&gpio3 19 0>;
+	};
+};
+
+
+Example (for ARM-based BeagleBoard Black on SPI0):
+
+&spi0 {
+
+	mrvlnfcspi0: spi@0 {
+		compatible = "marvell,nfc-spi";
+
+		reg = <0>;
+
+		/* SPI Bus configuration */
+		spi-max-frequency = <3000000>;
+		spi-cpha;
+		spi-cpol;
+
+		/* SPI INT configuration */
+		interrupt-parent = <&gpio1>;
+		interrupts = <17 0>;
+
+		/* Reset IO */
+       		reset-n-io = <&gpio3 19 0>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
index d707588..263732e 100644
--- a/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
@@ -11,6 +11,10 @@
 Optional SoC Specific Properties:
 - pinctrl-names: Contains only one value - "default".
 - pintctrl-0: Specifies the pin control groups used for this controller.
+- ese-present: Specifies that an ese is physically connected to the nfc
+controller.
+- uicc-present: Specifies that the uicc swp signal can be physically
+connected to the nfc controller.
 
 Example (for ARM-based BeagleBoard xM with ST21NFCB on I2C2):
 
@@ -29,5 +33,8 @@
 		interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
 
 		reset-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
+
+		ese-present;
+		uicc-present;
 	};
 };
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
index 525681b..711ca85 100644
--- a/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
+++ b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
@@ -2,7 +2,7 @@
 
 Required properties:
 - compatible: Should be "st,st21nfcb-spi"
-- spi-max-frequency: Maximum SPI frequency (<= 10000000).
+- spi-max-frequency: Maximum SPI frequency (<= 4000000).
 - interrupt-parent: phandle for the interrupt gpio controller
 - interrupts: GPIO interrupt to which the chip is connected
 - reset-gpios: Output GPIO pin used to reset the ST21NFCB
@@ -10,6 +10,10 @@
 Optional SoC Specific Properties:
 - pinctrl-names: Contains only one value - "default".
 - pintctrl-0: Specifies the pin control groups used for this controller.
+- ese-present: Specifies that an ese is physically connected to the nfc
+controller.
+- uicc-present: Specifies that the uicc swp signal can be physically
+connected to the nfc controller.
 
 Example (for ARM-based BeagleBoard xM with ST21NFCB on SPI4):
 
@@ -27,5 +31,8 @@
 		interrupts = <2 IRQ_TYPE_EDGE_RISING>;
 
 		reset-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
+
+		ese-present;
+		uicc-present;
 	};
 };
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 1fd8831..b486f3f 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -6,8 +6,12 @@
 Required properties:
 - compatible: "renesas,etheravb-r8a7790" if the device is a part of R8A7790 SoC.
 	      "renesas,etheravb-r8a7794" if the device is a part of R8A7794 SoC.
+	      "renesas,etheravb-r8a7795" if the device is a part of R8A7795 SoC.
 - reg: offset and length of (1) the register block and (2) the stream buffer.
-- interrupts: interrupt specifier for the sole interrupt.
+- interrupts: A list of interrupt-specifiers, one for each entry in
+	      interrupt-names.
+	      If interrupt-names is not present, an interrupt specifier
+	      for a single muxed interrupt.
 - phy-mode: see ethernet.txt file in the same directory.
 - phy-handle: see ethernet.txt file in the same directory.
 - #address-cells: number of address cells for the MDIO bus, must be equal to 1.
@@ -18,6 +22,12 @@
 Optional properties:
 - interrupt-parent: the phandle for the interrupt controller that services
 		    interrupts for this device.
+- interrupt-names: A list of interrupt names.
+		   For the R8A7795 SoC this property is mandatory;
+		   it should include one entry per channel, named "ch%u",
+		   where %u is the channel number ranging from 0 to 24.
+		   For other SoCs this property is optional; if present
+		   it should contain "mux" for a single muxed interrupt.
 - pinctrl-names: pin configuration state name ("default").
 - renesas,no-ether-link: boolean, specify when a board does not provide a proper
 			 AVB_LINK signal.
@@ -27,13 +37,46 @@
 Example:
 
 	ethernet@e6800000 {
-		compatible = "renesas,etheravb-r8a7790";
-		reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
+		compatible = "renesas,etheravb-r8a7795";
+		reg = <0 0xe6800000 0 0x800>, <0 0xe6a00000 0 0x10000>;
 		interrupt-parent = <&gic>;
-		interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
-		clocks = <&mstp8_clks R8A7790_CLK_ETHERAVB>;
-		phy-mode = "rmii";
+		interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "ch0", "ch1", "ch2", "ch3",
+				  "ch4", "ch5", "ch6", "ch7",
+				  "ch8", "ch9", "ch10", "ch11",
+				  "ch12", "ch13", "ch14", "ch15",
+				  "ch16", "ch17", "ch18", "ch19",
+				  "ch20", "ch21", "ch22", "ch23",
+				  "ch24";
+		clocks = <&mstp8_clks R8A7795_CLK_ETHERAVB>;
+		power-domains = <&cpg_clocks>;
+		phy-mode = "rgmii-id";
 		phy-handle = <&phy0>;
+
 		pinctrl-0 = <&ether_pins>;
 		pinctrl-names = "default";
 		renesas,no-ether-link;
@@ -41,8 +84,20 @@
 		#size-cells = <0>;
 
 		phy0: ethernet-phy@0 {
+			rxc-skew-ps = <900>;
+			rxdv-skew-ps = <0>;
+			rxd0-skew-ps = <0>;
+			rxd1-skew-ps = <0>;
+			rxd2-skew-ps = <0>;
+			rxd3-skew-ps = <0>;
+			txc-skew-ps = <900>;
+			txen-skew-ps = <0>;
+			txd0-skew-ps = <0>;
+			txd1-skew-ps = <0>;
+			txd2-skew-ps = <0>;
+			txd3-skew-ps = <0>;
 			reg = <0>;
 			interrupt-parent = <&gpio2>;
-			interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+			interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
 		};
 	};
diff --git a/Documentation/devicetree/bindings/net/smsc-lan87xx.txt b/Documentation/devicetree/bindings/net/smsc-lan87xx.txt
new file mode 100644
index 0000000..974edd5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/smsc-lan87xx.txt
@@ -0,0 +1,24 @@
+SMSC LAN87xx Ethernet PHY
+
+Some boards require special tuning values. Configure them
+through an Ethernet OF device node.
+
+Optional properties:
+
+- smsc,disable-energy-detect:
+  If set, do not enable energy detect mode for the SMSC phy.
+  default: enable energy detect mode
+
+Examples:
+smsc phy with disabled energy detect mode on an am335x based board.
+&davinci_mdio {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&davinci_mdio_default>;
+	pinctrl-1 = <&davinci_mdio_sleep>;
+	status = "okay";
+
+	ethernetphy0: ethernet-phy@0 {
+		reg = <0>;
+		smsc,disable-energy-detect;
+	};
+};
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index 8f77144..705075d 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -51,7 +51,7 @@
 - renesas,tx-fifo-size : Overrides the default tx fifo size given in words
 			 (default is 64)
 - renesas,rx-fifo-size : Overrides the default rx fifo size given in words
-			 (default is 64, or 256 on R-Car Gen2)
+			 (default is 64)
 
 Pinctrl properties might be needed, too.  See
 Documentation/devicetree/bindings/pinctrl/renesas,*.
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
index d71ef07..a057b75 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
@@ -6,6 +6,7 @@
 	"lsi,zevio-usb"
 	"qcom,ci-hdrc"
 	"chipidea,usb2"
+	"xlnx,zynq-usb-2.20a"
 - reg: base address and length of the registers
 - interrupts: interrupt for the USB controller
 
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
index 64a4ca6..7d48f63 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
@@ -5,6 +5,7 @@
 	- "renesas,usbhs-r8a7790"
 	- "renesas,usbhs-r8a7791"
 	- "renesas,usbhs-r8a7794"
+	- "renesas,usbhs-r8a7795"
   - reg: Base address and length of the register for the USBHS
   - interrupts: Interrupt specifier for the USBHS
   - clocks: A list of phandle + clock specifier pairs
diff --git a/Documentation/filesystems/nfs/nfsroot.txt b/Documentation/filesystems/nfs/nfsroot.txt
index 2d66ed6..bb5ab6d 100644
--- a/Documentation/filesystems/nfs/nfsroot.txt
+++ b/Documentation/filesystems/nfs/nfsroot.txt
@@ -157,6 +157,9 @@
 		  both:        use both BOOTP and RARP but not DHCP
 		               (old option kept for backwards compatibility)
 
+		if dhcp is used, the client identifier can be used by following
+		format "ip=dhcp,client-id-type,client-id-value"
+
                 Default: any
 
   <dns0-ip>	IP address of first nameserver.
diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt
index b85d000..c51f114 100644
--- a/Documentation/input/multi-touch-protocol.txt
+++ b/Documentation/input/multi-touch-protocol.txt
@@ -361,7 +361,7 @@
    ABS_MT_POSITION_X := T_X
    ABS_MT_POSITION_Y := T_Y
    ABS_MT_TOOL_X := C_X
-   ABS_MT_TOOL_X := C_Y
+   ABS_MT_TOOL_Y := C_Y
 
 Unfortunately, there is not enough information to specify both the touching
 ellipse and the tool ellipse, so one has to resort to approximations.  One
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 260f30b..05915be 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -384,6 +384,14 @@
 	Defaults are calculated at boot time from amount of available
 	memory.
 
+tcp_min_rtt_wlen - INTEGER
+	The window length of the windowed min filter to track the minimum RTT.
+	A shorter window lets a flow more quickly pick up new (higher)
+	minimum RTT when it is moved to a longer path (e.g., due to traffic
+	engineering). A longer window makes the filter more resistant to RTT
+	inflations such as transient congestion. The unit is seconds.
+	Default: 300
+
 tcp_moderate_rcvbuf - BOOLEAN
 	If set, TCP performs receive buffer auto-tuning, attempting to
 	automatically size the buffer (no greater than tcp_rmem[2]) to
@@ -425,6 +433,15 @@
 	you should think about lowering this value, such sockets
 	may consume significant resources. Cf. tcp_max_orphans.
 
+tcp_recovery - INTEGER
+	This value is a bitmap to enable various experimental loss recovery
+	features.
+
+	RACK: 0x1 enables the RACK loss detection for fast detection of lost
+	      retransmissions and tail drops.
+
+	Default: 0x1
+
 tcp_reordering - INTEGER
 	Initial reordering level of packets in a TCP stream.
 	TCP stack can then dynamically adjust flow reordering level
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index 9f9e2587b..9199413 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -115,7 +115,7 @@
 ^^^^^^^^^
 
 The switchdev driver must implement the switchdev op switchdev_port_attr_get
-for SWITCHDEV_ATTR_PORT_PARENT_ID for each port netdev, returning the same
+for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same
 physical ID for each port of a switch.  The ID must be unique between switches
 on the same system.  The ID does not need to be unique between switches on
 different systems.
@@ -178,7 +178,7 @@
 	bridge fdb add ADDR dev DEV [vlan VID] [self]
 
 The driver should use the helper switchdev_port_fdb_xxx ops for ndo_fdb_xxx
-ops, and handle add/delete/dump of SWITCHDEV_OBJ_PORT_FDB object using
+ops, and handle add/delete/dump of SWITCHDEV_OBJ_ID_PORT_FDB object using
 switchdev_port_obj_xxx ops.
 
 XXX: what should be done if offloading this rule to hardware fails (for
@@ -233,8 +233,9 @@
 device port and on the bridge port, and disable learning_sync.
 
 To support learning and learning_sync port attributes, the driver implements
-switchdev op switchdev_port_attr_get/set for SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS.
-The driver should initialize the attributes to the hardware defaults.
+switchdev op switchdev_port_attr_get/set for
+SWITCHDEV_ATTR_PORT_ID_BRIDGE_FLAGS. The driver should initialize the attributes
+to the hardware defaults.
 
 FDB Ageing
 ^^^^^^^^^^
@@ -260,7 +261,7 @@
 Internally or with a third-party STP protocol implementation (e.g. mstpd), the
 bridge driver maintains the STP state for ports, and will notify the switch
 driver of STP state change on a port using the switchdev op
-switchdev_attr_port_set for SWITCHDEV_ATTR_PORT_STP_UPDATE.
+switchdev_attr_port_set for SWITCHDEV_ATTR_PORT_ID_STP_UPDATE.
 
 State is one of BR_STATE_*.  The switch driver can use STP state updates to
 update ingress packet filter list for the port.  For example, if port is
@@ -277,8 +278,8 @@
 For a given L2 VLAN domain, the switch device should flood multicast/broadcast
 and unknown unicast packets to all ports in domain, if allowed by port's
 current STP state.  The switch driver, knowing which ports are within which
-vlan L2 domain, can program the switch device for flooding.  The packet should
-also be sent to the port netdev for processing by the bridge driver.  The
+vlan L2 domain, can program the switch device for flooding.  The packet may
+be sent to the port netdev for processing by the bridge driver.  The
 bridge should not reflood the packet to the same ports the device flooded,
 otherwise there will be duplicate packets on the wire.
 
@@ -297,6 +298,9 @@
 of ports scale in the L2 domain as the device is much more efficient at
 flooding packets that software.
 
+If supported by the device, flood control can be offloaded to it, preventing
+certain netdevs from flooding unicast traffic for which there is no FDB entry.
+
 IGMP Snooping
 ^^^^^^^^^^^^^
 
@@ -316,9 +320,9 @@
 switchdev_port_obj_add is used for both adding a new FIB entry to the device,
 or modifying an existing entry on the device.
 
-XXX: Currently, only SWITCHDEV_OBJ_IPV4_FIB objects are supported.
+XXX: Currently, only SWITCHDEV_OBJ_ID_IPV4_FIB objects are supported.
 
-SWITCHDEV_OBJ_IPV4_FIB object passes:
+SWITCHDEV_OBJ_ID_IPV4_FIB object passes:
 
 	struct switchdev_obj_ipv4_fib {         /* IPV4_FIB */
 		u32 dst;
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt
index 031ef4a..d52aa10 100644
--- a/Documentation/networking/vrf.txt
+++ b/Documentation/networking/vrf.txt
@@ -90,7 +90,304 @@
 
 Limitations
 -----------
-VRF device currently only works for IPv4. Support for IPv6 is under development.
-
 Index of original ingress interface is not available via cmsg. Will address
 soon.
+
+################################################################################
+
+Using iproute2 for VRFs
+=======================
+VRF devices do *not* have to start with 'vrf-'. That is a convention used here
+for emphasis of the device type, similar to use of 'br' in bridge names.
+
+1. Create a VRF
+
+   To instantiate a VRF device and associate it with a table:
+       $ ip link add dev NAME type vrf table ID
+
+   Remember to add the ip rules as well:
+       $ ip ru add oif NAME table 10
+       $ ip ru add iif NAME table 10
+       $ ip -6 ru add oif NAME table 10
+       $ ip -6 ru add iif NAME table 10
+
+   Without the rules route lookups are not directed to the table.
+
+   For example:
+   $ ip link add dev vrf-blue type vrf table 10
+   $ ip ru add pref 200 oif vrf-blue table 10
+   $ ip ru add pref 200 iif vrf-blue table 10
+   $ ip -6 ru add pref 200 oif vrf-blue table 10
+   $ ip -6 ru add pref 200 iif vrf-blue table 10
+
+
+2. List VRFs
+
+   To list VRFs that have been created:
+       $ ip [-d] link show type vrf
+         NOTE: The -d option is needed to show the table id
+
+   For example:
+   $ ip -d link show type vrf
+   11: vrf-mgmt: <NOARP,MASTER,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
+       link/ether 72:b3:ba:91:e2:24 brd ff:ff:ff:ff:ff:ff promiscuity 0
+       vrf table 1 addrgenmode eui64
+   12: vrf-red: <NOARP,MASTER,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
+       link/ether b6:6f:6e:f6:da:73 brd ff:ff:ff:ff:ff:ff promiscuity 0
+       vrf table 10 addrgenmode eui64
+   13: vrf-blue: <NOARP,MASTER,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
+       link/ether 36:62:e8:7d:bb:8c brd ff:ff:ff:ff:ff:ff promiscuity 0
+       vrf table 66 addrgenmode eui64
+   14: vrf-green: <NOARP,MASTER,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
+       link/ether e6:28:b8:63:70:bb brd ff:ff:ff:ff:ff:ff promiscuity 0
+       vrf table 81 addrgenmode eui64
+
+
+   Or in brief output:
+
+   $ ip -br link show type vrf
+   vrf-mgmt         UP             72:b3:ba:91:e2:24 <NOARP,MASTER,UP,LOWER_UP>
+   vrf-red          UP             b6:6f:6e:f6:da:73 <NOARP,MASTER,UP,LOWER_UP>
+   vrf-blue         UP             36:62:e8:7d:bb:8c <NOARP,MASTER,UP,LOWER_UP>
+   vrf-green        UP             e6:28:b8:63:70:bb <NOARP,MASTER,UP,LOWER_UP>
+
+
+3. Assign a Network Interface to a VRF
+
+   Network interfaces are assigned to a VRF by enslaving the netdevice to a
+   VRF device:
+       $ ip link set dev NAME master VRF-NAME
+
+   On enslavement connected and local routes are automatically moved to the
+   table associated with the VRF device.
+
+   For example:
+   $ ip link set dev eth0 master vrf-mgmt
+
+
+4. Show Devices Assigned to a VRF
+
+   To show devices that have been assigned to a specific VRF add the master
+   option to the ip command:
+       $ ip link show master VRF-NAME
+
+   For example:
+   $ ip link show master vrf-red
+   3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master vrf-red state UP mode DEFAULT group default qlen 1000
+       link/ether 02:00:00:00:02:02 brd ff:ff:ff:ff:ff:ff
+   4: eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master vrf-red state UP mode DEFAULT group default qlen 1000
+       link/ether 02:00:00:00:02:03 brd ff:ff:ff:ff:ff:ff
+   7: eth5: <BROADCAST,MULTICAST> mtu 1500 qdisc noop master vrf-red state DOWN mode DEFAULT group default qlen 1000
+       link/ether 02:00:00:00:02:06 brd ff:ff:ff:ff:ff:ff
+
+
+   Or using the brief output:
+   $ ip -br link show master vrf-red
+   eth1             UP             02:00:00:00:02:02 <BROADCAST,MULTICAST,UP,LOWER_UP>
+   eth2             UP             02:00:00:00:02:03 <BROADCAST,MULTICAST,UP,LOWER_UP>
+   eth5             DOWN           02:00:00:00:02:06 <BROADCAST,MULTICAST>
+
+
+5. Show Neighbor Entries for a VRF
+
+   To list neighbor entries associated with devices enslaved to a VRF device
+   add the master option to the ip command:
+       $ ip [-6] neigh show master VRF-NAME
+
+   For example:
+   $  ip neigh show master vrf-red
+   10.2.1.254 dev eth1 lladdr a6:d9:c7:4f:06:23 REACHABLE
+   10.2.2.254 dev eth2 lladdr 5e:54:01:6a:ee:80 REACHABLE
+
+    $ ip -6 neigh show master vrf-red
+    2002:1::64 dev eth1 lladdr a6:d9:c7:4f:06:23 REACHABLE
+
+
+6. Show Addresses for a VRF
+
+   To show addresses for interfaces associated with a VRF add the master
+   option to the ip command:
+       $ ip addr show master VRF-NAME
+
+   For example:
+   $ ip addr show master vrf-red
+   3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master vrf-red state UP group default qlen 1000
+       link/ether 02:00:00:00:02:02 brd ff:ff:ff:ff:ff:ff
+       inet 10.2.1.2/24 brd 10.2.1.255 scope global eth1
+          valid_lft forever preferred_lft forever
+       inet6 2002:1::2/120 scope global
+          valid_lft forever preferred_lft forever
+       inet6 fe80::ff:fe00:202/64 scope link
+          valid_lft forever preferred_lft forever
+   4: eth2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast master vrf-red state UP group default qlen 1000
+       link/ether 02:00:00:00:02:03 brd ff:ff:ff:ff:ff:ff
+       inet 10.2.2.2/24 brd 10.2.2.255 scope global eth2
+          valid_lft forever preferred_lft forever
+       inet6 2002:2::2/120 scope global
+          valid_lft forever preferred_lft forever
+       inet6 fe80::ff:fe00:203/64 scope link
+          valid_lft forever preferred_lft forever
+   7: eth5: <BROADCAST,MULTICAST> mtu 1500 qdisc noop master vrf-red state DOWN group default qlen 1000
+       link/ether 02:00:00:00:02:06 brd ff:ff:ff:ff:ff:ff
+
+   Or in brief format:
+   $ ip -br addr show master vrf-red
+   eth1             UP             10.2.1.2/24 2002:1::2/120 fe80::ff:fe00:202/64
+   eth2             UP             10.2.2.2/24 2002:2::2/120 fe80::ff:fe00:203/64
+   eth5             DOWN
+
+
+7. Show Routes for a VRF
+
+   To show routes for a VRF use the ip command to display the table associated
+   with the VRF device:
+       $ ip [-6] route show table ID
+
+   For example:
+   $ ip route show table vrf-red
+   prohibit default
+   broadcast 10.2.1.0 dev eth1  proto kernel  scope link  src 10.2.1.2
+   10.2.1.0/24 dev eth1  proto kernel  scope link  src 10.2.1.2
+   local 10.2.1.2 dev eth1  proto kernel  scope host  src 10.2.1.2
+   broadcast 10.2.1.255 dev eth1  proto kernel  scope link  src 10.2.1.2
+   broadcast 10.2.2.0 dev eth2  proto kernel  scope link  src 10.2.2.2
+   10.2.2.0/24 dev eth2  proto kernel  scope link  src 10.2.2.2
+   local 10.2.2.2 dev eth2  proto kernel  scope host  src 10.2.2.2
+   broadcast 10.2.2.255 dev eth2  proto kernel  scope link  src 10.2.2.2
+
+   $ ip -6 route show table vrf-red
+   local 2002:1:: dev lo  proto none  metric 0  pref medium
+   local 2002:1::2 dev lo  proto none  metric 0  pref medium
+   2002:1::/120 dev eth1  proto kernel  metric 256  pref medium
+   local 2002:2:: dev lo  proto none  metric 0  pref medium
+   local 2002:2::2 dev lo  proto none  metric 0  pref medium
+   2002:2::/120 dev eth2  proto kernel  metric 256  pref medium
+   local fe80:: dev lo  proto none  metric 0  pref medium
+   local fe80:: dev lo  proto none  metric 0  pref medium
+   local fe80::ff:fe00:202 dev lo  proto none  metric 0  pref medium
+   local fe80::ff:fe00:203 dev lo  proto none  metric 0  pref medium
+   fe80::/64 dev eth1  proto kernel  metric 256  pref medium
+   fe80::/64 dev eth2  proto kernel  metric 256  pref medium
+   ff00::/8 dev vrf-red  metric 256  pref medium
+   ff00::/8 dev eth1  metric 256  pref medium
+   ff00::/8 dev eth2  metric 256  pref medium
+
+
+8. Route Lookup for a VRF
+
+   A test route lookup can be done for a VRF by adding the oif option to ip:
+       $ ip [-6] route get oif VRF-NAME ADDRESS
+
+   For example:
+   $ ip route get 10.2.1.40 oif vrf-red
+   10.2.1.40 dev eth1  table vrf-red  src 10.2.1.2
+       cache
+
+   $ ip -6 route get 2002:1::32 oif vrf-red
+   2002:1::32 from :: dev eth1  table vrf-red  proto kernel  src 2002:1::2  metric 256  pref medium
+
+
+9. Removing Network Interface from a VRF
+
+   Network interfaces are removed from a VRF by breaking the enslavement to
+   the VRF device:
+       $ ip link set dev NAME nomaster
+
+   Connected routes are moved back to the default table and local entries are
+   moved to the local table.
+
+   For example:
+   $ ip link set dev eth0 nomaster
+
+--------------------------------------------------------------------------------
+
+Commands used in this example:
+
+cat >> /etc/iproute2/rt_tables <<EOF
+1  vrf-mgmt
+10 vrf-red
+66 vrf-blue
+81 vrf-green
+EOF
+
+function vrf_create
+{
+    VRF=$1
+    TBID=$2
+    # create VRF device
+    ip link add vrf-${VRF} type vrf table ${TBID}
+
+    # add rules that direct lookups to vrf table
+    ip ru add pref 200 oif vrf-${VRF} table ${TBID}
+    ip ru add pref 200 iif vrf-${VRF} table ${TBID}
+    ip -6 ru add pref 200 oif vrf-${VRF} table ${TBID}
+    ip -6 ru add pref 200 iif vrf-${VRF} table ${TBID}
+
+    if [ "${VRF}" != "mgmt" ]; then
+        ip route add table ${TBID} prohibit default
+    fi
+    ip link set dev vrf-${VRF} up
+    ip link set dev vrf-${VRF} state up
+}
+
+vrf_create mgmt 1
+ip link set dev eth0 master vrf-mgmt
+
+vrf_create red 10
+ip link set dev eth1 master vrf-red
+ip link set dev eth2 master vrf-red
+ip link set dev eth5 master vrf-red
+
+vrf_create blue 66
+ip link set dev eth3 master vrf-blue
+
+vrf_create green 81
+ip link set dev eth4 master vrf-green
+
+
+Interface addresses from /etc/network/interfaces:
+auto eth0
+iface eth0 inet static
+      address 10.0.0.2
+      netmask 255.255.255.0
+      gateway 10.0.0.254
+
+iface eth0 inet6 static
+      address 2000:1::2
+      netmask 120
+
+auto eth1
+iface eth1 inet static
+      address 10.2.1.2
+      netmask 255.255.255.0
+
+iface eth1 inet6 static
+      address 2002:1::2
+      netmask 120
+
+auto eth2
+iface eth2 inet static
+      address 10.2.2.2
+      netmask 255.255.255.0
+
+iface eth2 inet6 static
+      address 2002:2::2
+      netmask 120
+
+auto eth3
+iface eth3 inet static
+      address 10.2.3.2
+      netmask 255.255.255.0
+
+iface eth3 inet6 static
+      address 2002:3::2
+      netmask 120
+
+auto eth4
+iface eth4 inet static
+      address 10.2.4.2
+      netmask 255.255.255.0
+
+iface eth4 inet6 static
+      address 2002:4::2
+      netmask 120
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt
index 62328d7..b0e911e 100644
--- a/Documentation/power/pci.txt
+++ b/Documentation/power/pci.txt
@@ -979,20 +979,45 @@
 (alternatively, the runtime_suspend() callback will have to check if the
 device should really be suspended and return -EAGAIN if that is not the case).
 
-The runtime PM of PCI devices is disabled by default.  It is also blocked by
-pci_pm_init() that runs the pm_runtime_forbid() helper function.  If a PCI
-driver implements the runtime PM callbacks and intends to use the runtime PM
-framework provided by the PM core and the PCI subsystem, it should enable this
-feature by executing the pm_runtime_enable() helper function.  However, the
-driver should not call the pm_runtime_allow() helper function unblocking
-the runtime PM of the device.  Instead, it should allow user space or some
-platform-specific code to do that (user space can do it via sysfs), although
-once it has called pm_runtime_enable(), it must be prepared to handle the
+The runtime PM of PCI devices is enabled by default by the PCI core.  PCI
+device drivers do not need to enable it and should not attempt to do so.
+However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid()
+helper function.  In addition to that, the runtime PM usage counter of
+each PCI device is incremented by local_pci_probe() before executing the
+probe callback provided by the device's driver.
+
+If a PCI driver implements the runtime PM callbacks and intends to use the
+runtime PM framework provided by the PM core and the PCI subsystem, it needs
+to decrement the device's runtime PM usage counter in its probe callback
+function.  If it doesn't do that, the counter will always be different from
+zero for the device and it will never be runtime-suspended.  The simplest
+way to do that is by calling pm_runtime_put_noidle(), but if the driver
+wants to schedule an autosuspend right away, for example, it may call
+pm_runtime_put_autosuspend() instead for this purpose.  Generally, it
+just needs to call a function that decrements the devices usage counter
+from its probe routine to make runtime PM work for the device.
+
+It is important to remember that the driver's runtime_suspend() callback
+may be executed right after the usage counter has been decremented, because
+user space may already have cuased the pm_runtime_allow() helper function
+unblocking the runtime PM of the device to run via sysfs, so the driver must
+be prepared to cope with that.
+
+The driver itself should not call pm_runtime_allow(), though.  Instead, it
+should let user space or some platform-specific code do that (user space can
+do it via sysfs as stated above), but it must be prepared to handle the
 runtime PM of the device correctly as soon as pm_runtime_allow() is called
-(which may happen at any time).  [It also is possible that user space causes
-pm_runtime_allow() to be called via sysfs before the driver is loaded, so in
-fact the driver has to be prepared to handle the runtime PM of the device as
-soon as it calls pm_runtime_enable().]
+(which may happen at any time, even before the driver is loaded).
+
+When the driver's remove callback runs, it has to balance the decrementation
+of the device's runtime PM usage counter at the probe time.  For this reason,
+if it has decremented the counter in its probe callback, it must run
+pm_runtime_get_noresume() in its remove callback.  [Since the core carries
+out a runtime resume of the device and bumps up the device's usage counter
+before running the driver's remove callback, the runtime PM of the device
+is effectively disabled for the duration of the remove execution and all
+runtime PM helper functions incrementing the device's usage counter are
+then effectively equivalent to pm_runtime_get_noresume().]
 
 The runtime PM framework works by processing requests to suspend or resume
 devices, or to check if they are idle (in which cases it is reasonable to
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index 2bc8abc..6c6247a 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -18,6 +18,7 @@
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__        /* For PPC64, to get LL64 types */
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>
diff --git a/MAINTAINERS b/MAINTAINERS
index bcd263d..b6d822d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -615,9 +615,8 @@
 F:	drivers/hwmon/fam15h_power.c
 
 AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER
-M:	Thomas Dahlmann <dahlmann.thomas@arcor.de>
 L:	linux-geode@lists.infradead.org (moderated for non-subscribers)
-S:	Supported
+S:	Orphan
 F:	drivers/usb/gadget/udc/amd5536udc.*
 
 AMD GEODE PROCESSOR/CHIPSET SUPPORT
@@ -3401,7 +3400,6 @@
 
 DIGI EPCA PCI PRODUCTS
 M:	Lidza Louina <lidza.louina@gmail.com>
-M:	Mark Hounschell <markh@compro.net>
 M:	Daeseok Youn <daeseok.youn@gmail.com>
 L:	driverdev-devel@linuxdriverproject.org
 S:	Maintained
@@ -3593,6 +3591,13 @@
 F:	include/drm/i915*
 F:	include/uapi/drm/i915*
 
+DRM DRIVERS FOR ATMEL HLCDC
+M:	Boris Brezillon <boris.brezillon@free-electrons.com>
+L:	dri-devel@lists.freedesktop.org
+S:	Supported
+F:	drivers/gpu/drm/atmel-hlcdc/
+F:	Documentation/devicetree/bindings/drm/atmel/
+
 DRM DRIVERS FOR EXYNOS
 M:	Inki Dae <inki.dae@samsung.com>
 M:	Joonyoung Shim <jy0922.shim@samsung.com>
@@ -3621,6 +3626,14 @@
 F:	drivers/gpu/drm/imx/
 F:	Documentation/devicetree/bindings/drm/imx/
 
+DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
+M:	Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+L:	dri-devel@lists.freedesktop.org
+T:	git git://github.com/patjak/drm-gma500
+S:	Maintained
+F:	drivers/gpu/drm/gma500
+F:	include/drm/gma500*
+
 DRM DRIVERS FOR NVIDIA TEGRA
 M:	Thierry Reding <thierry.reding@gmail.com>
 M:	Terje Bergström <tbergstrom@nvidia.com>
@@ -4005,7 +4018,7 @@
 F:	sound/usb/misc/ua101.c
 
 EXTENSIBLE FIRMWARE INTERFACE (EFI)
-M:	Matt Fleming <matt.fleming@intel.com>
+M:	Matt Fleming <matt@codeblueprint.co.uk>
 L:	linux-efi@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 S:	Maintained
@@ -4020,7 +4033,7 @@
 EFI VARIABLE FILESYSTEM
 M:	Matthew Garrett <matthew.garrett@nebula.com>
 M:	Jeremy Kerr <jk@ozlabs.org>
-M:	Matt Fleming <matt.fleming@intel.com>
+M:	Matt Fleming <matt@codeblueprint.co.uk>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 L:	linux-efi@vger.kernel.org
 S:	Maintained
@@ -5548,7 +5561,7 @@
 INTEL WIRELESS WIFI LINK (iwlwifi)
 M:	Johannes Berg <johannes.berg@intel.com>
 M:	Emmanuel Grumbach <emmanuel.grumbach@intel.com>
-M:	Intel Linux Wireless <ilw@linux.intel.com>
+M:	Intel Linux Wireless <linuxwifi@intel.com>
 L:	linux-wireless@vger.kernel.org
 W:	http://intellinuxwireless.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
@@ -5959,7 +5972,7 @@
 KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V
 M:	Joerg Roedel <joro@8bytes.org>
 L:	kvm@vger.kernel.org
-W:	http://kvm.qumranet.com
+W:	http://www.linux-kvm.org/
 S:	Maintained
 F:	arch/x86/include/asm/svm.h
 F:	arch/x86/kvm/svm.c
@@ -5967,7 +5980,7 @@
 KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
 M:	Alexander Graf <agraf@suse.com>
 L:	kvm-ppc@vger.kernel.org
-W:	http://kvm.qumranet.com
+W:	http://www.linux-kvm.org/
 T:	git git://github.com/agraf/linux-2.6.git
 S:	Supported
 F:	arch/powerpc/include/asm/kvm*
@@ -6095,6 +6108,13 @@
 F:	drivers/auxdisplay/ks0108.c
 F:	include/linux/ks0108.h
 
+L3MDEV
+M:	David Ahern <dsa@cumulusnetworks.com>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	net/l3mdev
+F:	include/net/l3mdev.h
+
 LAPB module
 L:	linux-x25@vger.kernel.org
 S:	Orphan
@@ -6780,7 +6800,6 @@
 
 MELLANOX ETHERNET DRIVER (mlx4_en)
 M:	Amir Vadai <amirv@mellanox.com>
-M:	Ido Shamay <idos@mellanox.com>
 L:	netdev@vger.kernel.org
 S:	Supported
 W:	http://www.mellanox.com
@@ -6973,6 +6992,7 @@
 L:	linux-wpan@vger.kernel.org
 S:	Maintained
 F:	drivers/net/ieee802154/mrf24j40.c
+F:	Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt
 
 MSI LAPTOP SUPPORT
 M:	"Lee, Chun-Yi" <jlee@suse.com>
@@ -8520,6 +8540,16 @@
 S:	Supported
 F:	drivers/net/ethernet/qlogic/qlge/
 
+QLOGIC QL4xxx ETHERNET DRIVER
+M:	Yuval Mintz <Yuval.Mintz@qlogic.com>
+M:	Ariel Elior <Ariel.Elior@qlogic.com>
+M:	everest-linux-l2@qlogic.com
+L:	netdev@vger.kernel.org
+S:	Supported
+F:	drivers/net/ethernet/qlogic/qed/
+F:	include/linux/qed/
+F:	drivers/net/ethernet/qlogic/qede/
+
 QNX4 FILESYSTEM
 M:	Anders Larsen <al@alarsen.net>
 W:	http://www.alarsen.net/linux/qnx4fs/
@@ -8871,6 +8901,13 @@
 F:	drivers/net/wireless/rtlwifi/
 F:	drivers/net/wireless/rtlwifi/rtl8192ce/
 
+RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
+M:	Jes Sorensen <Jes.Sorensen@redhat.com>
+L:	linux-wireless@vger.kernel.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8723au-mac80211
+S:	Maintained
+F:	drivers/net/wireless/realtek/rtl8xxxu/
+
 S3 SAVAGE FRAMEBUFFER DRIVER
 M:	Antonino Daplas <adaplas@gmail.com>
 L:	linux-fbdev@vger.kernel.org
@@ -9102,6 +9139,15 @@
 F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
 F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
 
+SYNOPSYS DESIGNWARE I2C DRIVER
+M:	Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+M:	Jarkko Nikula <jarkko.nikula@linux.intel.com>
+M:	Mika Westerberg <mika.westerberg@linux.intel.com>
+L:	linux-i2c@vger.kernel.org
+S:	Maintained
+F:	drivers/i2c/busses/i2c-designware-*
+F:	include/linux/platform_data/i2c-designware.h
+
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
 M:	Seungwon Jeon <tgih.jun@samsung.com>
 M:	Jaehoon Chung <jh80.chung@samsung.com>
@@ -9909,13 +9955,12 @@
 STAGING - LUSTRE PARALLEL FILESYSTEM
 M:	Oleg Drokin <oleg.drokin@intel.com>
 M:	Andreas Dilger <andreas.dilger@intel.com>
-L:	HPDD-discuss@lists.01.org (moderated for non-subscribers)
-W:	http://lustre.opensfs.org/
+L:	lustre-devel@lists.lustre.org (moderated for non-subscribers)
+W:	http://wiki.lustre.org/
 S:	Maintained
 F:	drivers/staging/lustre
 
 STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
-M:	Julian Andres Klode <jak@jak-linux.org>
 M:	Marc Dietrich <marvin24@gmx.de>
 L:	ac100@lists.launchpad.net (moderated for non-subscribers)
 L:	linux-tegra@vger.kernel.org
@@ -11202,7 +11247,7 @@
 F:	include/linux/vlynq.h
 
 VME SUBSYSTEM
-M:	Martyn Welch <martyn.welch@ge.com>
+M:	Martyn Welch <martyn@welchs.me.uk>
 M:	Manohar Vanga <manohar.vanga@gmail.com>
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:	devel@driverdev.osuosl.org
@@ -11266,7 +11311,6 @@
 L:	netdev@vger.kernel.org
 S:	Maintained
 F:	drivers/net/vrf.c
-F:	include/net/vrf.h
 F:	Documentation/networking/vrf.txt
 
 VT1211 HARDWARE MONITOR DRIVER
@@ -11379,15 +11423,6 @@
 S:	Maintained
 F:	drivers/net/wireless/wl3501*
 
-WM97XX TOUCHSCREEN DRIVERS
-M:	Mark Brown <broonie@kernel.org>
-M:	Liam Girdwood <lrg@slimlogic.co.uk>
-L:	linux-input@vger.kernel.org
-W:	https://github.com/CirrusLogic/linux-drivers/wiki
-S:	Supported
-F:	drivers/input/touchscreen/*wm97*
-F:	include/linux/wm97xx.h
-
 WOLFSON MICROELECTRONICS DRIVERS
 L:	patches@opensource.wolfsonmicro.com
 T:	git https://github.com/CirrusLogic/linux-drivers.git
diff --git a/Makefile b/Makefile
index 84f4b31..d33ab74 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 4
 PATCHLEVEL = 3
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
-NAME = Hurr durr I'ma sheep
+EXTRAVERSION = -rc6
+NAME = Blurry Fish Butt
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h
index 6b340d0..902e6ab 100644
--- a/arch/alpha/include/asm/word-at-a-time.h
+++ b/arch/alpha/include/asm/word-at-a-time.h
@@ -52,4 +52,6 @@
 #endif
 }
 
+#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1)
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index 7611b10..0b10ef2 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -48,4 +48,5 @@
 generic-y += ucontext.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 233159d..bb8fa02 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -578,7 +578,7 @@
 	sun4i-a10-hackberry.dtb \
 	sun4i-a10-hyundai-a7hd.dtb \
 	sun4i-a10-inet97fv2.dtb \
-	sun4i-a10-itead-iteaduino-plus.dts \
+	sun4i-a10-itead-iteaduino-plus.dtb \
 	sun4i-a10-jesurun-q5.dtb \
 	sun4i-a10-marsboard.dtb \
 	sun4i-a10-mini-xplus.dtb \
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi
index 4d28fc3..5dd084f 100644
--- a/arch/arm/boot/dts/am335x-phycore-som.dtsi
+++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi
@@ -252,10 +252,10 @@
 		};
 
 		vdd1_reg: regulator@2 {
-			/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
+			/* VDD_MPU voltage limits 0.95V - 1.325V with +/-4% tolerance */
 			regulator-name = "vdd_mpu";
 			regulator-min-microvolt = <912500>;
-			regulator-max-microvolt = <1312500>;
+			regulator-max-microvolt = <1378000>;
 			regulator-boot-on;
 			regulator-always-on;
 		};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 3a05b94..568adf5 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -98,13 +98,6 @@
 		pinctrl-0 = <&extcon_usb1_pins>;
 	};
 
-	extcon_usb2: extcon_usb2 {
-		compatible = "linux,extcon-usb-gpio";
-		id-gpio = <&gpio7 24 GPIO_ACTIVE_HIGH>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&extcon_usb2_pins>;
-	};
-
 	hdmi0: connector {
 		compatible = "hdmi-connector";
 		label = "hdmi";
@@ -326,12 +319,6 @@
 		>;
 	};
 
-	extcon_usb2_pins: extcon_usb2_pins {
-		pinctrl-single,pins = <
-			0x3e8 (PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_ctsn.gpio7_24 */
-		>;
-	};
-
 	tpd12s015_pins: pinmux_tpd12s015_pins {
 		pinctrl-single,pins = <
 			0x3b0 (PIN_OUTPUT | MUX_MODE14)		/* gpio7_10 CT_CP_HPD */
@@ -432,7 +419,7 @@
 				};
 
 				ldo3_reg: ldo3 {
-					/* VDDA_1V8_PHY */
+					/* VDDA_1V8_PHYA */
 					regulator-name = "ldo3";
 					regulator-min-microvolt = <1800000>;
 					regulator-max-microvolt = <1800000>;
@@ -440,6 +427,15 @@
 					regulator-boot-on;
 				};
 
+				ldo4_reg: ldo4 {
+					/* VDDA_1V8_PHYB */
+					regulator-name = "ldo4";
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					regulator-always-on;
+					regulator-boot-on;
+				};
+
 				ldo9_reg: ldo9 {
 					/* VDD_RTC */
 					regulator-name = "ldo9";
@@ -495,6 +491,14 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 		};
+
+		extcon_usb2: tps659038_usb {
+			compatible = "ti,palmas-usb-vid";
+			ti,enable-vbus-detection;
+			ti,enable-id-detection;
+			id-gpios = <&gpio7 24 GPIO_ACTIVE_HIGH>;
+		};
+
 	};
 
 	tmp102: tmp102@48 {
@@ -517,7 +521,8 @@
 	mcp_rtc: rtc@6f {
 		compatible = "microchip,mcp7941x";
 		reg = <0x6f>;
-		interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>;  /* IRQ_SYS_1N */
+		interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
+				      <&dra7_pmx_core 0x424>;
 
 		pinctrl-names = "default";
 		pinctrl-0 = <&mcp79410_pins_default>;
@@ -579,7 +584,6 @@
 	pinctrl-0 = <&mmc1_pins_default>;
 
 	vmmc-supply = <&ldo1_reg>;
-	vmmc_aux-supply = <&vdd_3v3>;
 	bus-width = <4>;
 	cd-gpios = <&gpio6 27 0>; /* gpio 219 */
 };
@@ -623,6 +627,14 @@
 };
 
 &usb2 {
+	/*
+	 * Stand alone usage is peripheral only.
+	 * However, with some resistor modifications
+	 * this port can be used via expansion connectors
+	 * as "host" or "dual-role". If so, provide
+	 * the necessary dr_mode override in the expansion
+	 * board's DT.
+	 */
 	dr_mode = "peripheral";
 };
 
@@ -681,7 +693,7 @@
 
 &hdmi {
 	status = "ok";
-	vdda-supply = <&ldo3_reg>;
+	vdda-supply = <&ldo4_reg>;
 
 	pinctrl-names = "default";
 	pinctrl-0 = <&hdmi_pins>;
diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts
index 92bacd3..109fd47 100644
--- a/arch/arm/boot/dts/dm8148-evm.dts
+++ b/arch/arm/boot/dts/dm8148-evm.dts
@@ -19,10 +19,10 @@
 
 &cpsw_emac0 {
 	phy_id = <&davinci_mdio>, <0>;
-	phy-mode = "mii";
+	phy-mode = "rgmii";
 };
 
 &cpsw_emac1 {
 	phy_id = <&davinci_mdio>, <1>;
-	phy-mode = "mii";
+	phy-mode = "rgmii";
 };
diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts
index 8c4bbc7..79838dd 100644
--- a/arch/arm/boot/dts/dm8148-t410.dts
+++ b/arch/arm/boot/dts/dm8148-t410.dts
@@ -8,7 +8,7 @@
 #include "dm814x.dtsi"
 
 / {
-	model = "DM8148 EVM";
+	model = "HP t410 Smart Zero Client";
 	compatible = "hp,t410", "ti,dm8148";
 
 	memory {
@@ -19,10 +19,10 @@
 
 &cpsw_emac0 {
 	phy_id = <&davinci_mdio>, <0>;
-	phy-mode = "mii";
+	phy-mode = "rgmii";
 };
 
 &cpsw_emac1 {
 	phy_id = <&davinci_mdio>, <1>;
-	phy-mode = "mii";
+	phy-mode = "rgmii";
 };
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
index 972c9c9..7988b42 100644
--- a/arch/arm/boot/dts/dm814x.dtsi
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -181,9 +181,9 @@
 				ti,hwmods = "timer3";
 			};
 
-			control: control@160000 {
+			control: control@140000 {
 				compatible = "ti,dm814-scm", "simple-bus";
-				reg = <0x160000 0x16d000>;
+				reg = <0x140000 0x16d000>;
 				#address-cells = <1>;
 				#size-cells = <1>;
 				ranges = <0 0x160000 0x16d000>;
@@ -321,9 +321,9 @@
 				mac-address = [ 00 00 00 00 00 00 ];
 			};
 
-			phy_sel: cpsw-phy-sel@0x48160650 {
+			phy_sel: cpsw-phy-sel@48140650 {
 				compatible = "ti,am3352-cpsw-phy-sel";
-				reg= <0x48160650 0x4>;
+				reg= <0x48140650 0x4>;
 				reg-names = "gmii-sel";
 			};
 		};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 76c739d..8fedddc 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -120,9 +120,10 @@
 					reg = <0x0 0x1400>;
 					#address-cells = <1>;
 					#size-cells = <1>;
+					ranges = <0 0x0 0x1400>;
 
 					pbias_regulator: pbias_regulator {
-						compatible = "ti,pbias-omap";
+						compatible = "ti,pbias-dra7", "ti,pbias-omap";
 						reg = <0xe00 0x4>;
 						syscon = <&scm_conf>;
 						pbias_mmc_reg: pbias_mmc_omap5 {
@@ -1417,7 +1418,7 @@
 			ti,irqs-safe-map = <0>;
 		};
 
-		mac: ethernet@4a100000 {
+		mac: ethernet@48484000 {
 			compatible = "ti,dra7-cpsw","ti,cpsw";
 			ti,hwmods = "gmac";
 			clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>;
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi
index ca0e3c1..294cfe4 100644
--- a/arch/arm/boot/dts/exynos4412.dtsi
+++ b/arch/arm/boot/dts/exynos4412.dtsi
@@ -98,6 +98,7 @@
 			opp-hz = /bits/ 64 <800000000>;
 			opp-microvolt = <1000000>;
 			clock-latency-ns = <200000>;
+			opp-suspend;
 		};
 		opp07 {
 			opp-hz = /bits/ 64 <900000000>;
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 15aea76..c625e71 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -197,6 +197,7 @@
 				regulator-name = "P1.8V_LDO_OUT10";
 				regulator-min-microvolt = <1800000>;
 				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
 			};
 
 			ldo11_reg: LDO11 {
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index df9aee9..1b3d6c7 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -1117,7 +1117,7 @@
 		interrupt-parent = <&combiner>;
 		interrupts = <3 0>;
 		clock-names = "sysmmu", "master";
-		clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>;
+		clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>;
 		power-domains = <&disp_pd>;
 		#iommu-cells = <0>;
 	};
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index 79ffdfe..3b43e57 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -472,7 +472,6 @@
 	 */
 	pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>;
 	pinctrl-names = "default";
-	samsung,pwm-outputs = <0>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts
index 66e47de..96d7eed 100644
--- a/arch/arm/boot/dts/imx53-qsrb.dts
+++ b/arch/arm/boot/dts/imx53-qsrb.dts
@@ -36,7 +36,7 @@
 		pinctrl-0 = <&pinctrl_pmic>;
 		reg = <0x08>;
 		interrupt-parent = <&gpio5>;
-		interrupts = <23 0x8>;
+		interrupts = <23 IRQ_TYPE_LEVEL_HIGH>;
 		regulators {
 			sw1_reg: sw1a {
 				regulator-name = "SW1";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index c3e3ca9..cd17037 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -15,6 +15,7 @@
 #include <dt-bindings/clock/imx5-clock.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
 	aliases {
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
index 3373fd9..a503562 100644
--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
@@ -35,7 +35,6 @@
 			compatible = "regulator-fixed";
 			reg = <1>;
 			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_usbh1>;
 			regulator-name = "usbh1_vbus";
 			regulator-min-microvolt = <5000000>;
 			regulator-max-microvolt = <5000000>;
@@ -47,7 +46,6 @@
 			compatible = "regulator-fixed";
 			reg = <2>;
 			pinctrl-names = "default";
-			pinctrl-0 = <&pinctrl_usbotg>;
 			regulator-name = "usb_otg_vbus";
 			regulator-min-microvolt = <5000000>;
 			regulator-max-microvolt = <5000000>;
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 2390f38..798dda0 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -56,6 +56,7 @@
 					reg = <0x270 0x240>;
 					#address-cells = <1>;
 					#size-cells = <1>;
+					ranges = <0 0x270 0x240>;
 
 					scm_clocks: clocks {
 						#address-cells = <1>;
@@ -63,7 +64,7 @@
 					};
 
 					pbias_regulator: pbias_regulator {
-						compatible = "ti,pbias-omap";
+						compatible = "ti,pbias-omap2", "ti,pbias-omap";
 						reg = <0x230 0x4>;
 						syscon = <&scm_conf>;
 						pbias_mmc_reg: pbias_mmc_omap2430 {
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index a547411..67659a0 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -202,7 +202,7 @@
 
 	tfp410_pins: pinmux_tfp410_pins {
 		pinctrl-single,pins = <
-			0x194 (PIN_OUTPUT | MUX_MODE4)	/* hdq_sio.gpio_170 */
+			0x196 (PIN_OUTPUT | MUX_MODE4)	/* hdq_sio.gpio_170 */
 		>;
 	};
 
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index d5e5cd4..2230e1c 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -78,12 +78,6 @@
 		>;
 	};
 
-	smsc9221_pins: pinmux_smsc9221_pins {
-		pinctrl-single,pins = <
-			0x1a2 (PIN_INPUT | MUX_MODE4)		/* mcspi1_cs2.gpio_176 */
-		>;
-	};
-
 	i2c1_pins: pinmux_i2c1_pins {
 		pinctrl-single,pins = <
 			0x18a (PIN_INPUT | MUX_MODE0)   /* i2c1_scl.i2c1_scl */
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
index e458c21..5ad688c 100644
--- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi
+++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi
@@ -156,6 +156,12 @@
 			OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0)	/* uart2_rx.uart2_rx */
 		>;
 	};
+
+	smsc9221_pins: pinmux_smsc9221_pins {
+		pinctrl-single,pins = <
+			OMAP3_CORE1_IOPAD(0x21d2, PIN_INPUT | MUX_MODE4)	/* mcspi1_cs2.gpio_176 */
+		>;
+	};
 };
 
 &omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 69a40cf..8a2b253 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -113,10 +113,22 @@
 				};
 
 				scm_conf: scm_conf@270 {
-					compatible = "syscon";
+					compatible = "syscon", "simple-bus";
 					reg = <0x270 0x330>;
 					#address-cells = <1>;
 					#size-cells = <1>;
+					ranges = <0 0x270 0x330>;
+
+					pbias_regulator: pbias_regulator {
+						compatible = "ti,pbias-omap3", "ti,pbias-omap";
+						reg = <0x2b0 0x4>;
+						syscon = <&scm_conf>;
+						pbias_mmc_reg: pbias_mmc_omap2430 {
+							regulator-name = "pbias_mmc_omap2430";
+							regulator-min-microvolt = <1800000>;
+							regulator-max-microvolt = <3000000>;
+						};
+					};
 
 					scm_clocks: clocks {
 						#address-cells = <1>;
@@ -202,17 +214,6 @@
 			dma-requests = <96>;
 		};
 
-		pbias_regulator: pbias_regulator {
-			compatible = "ti,pbias-omap";
-			reg = <0x2b0 0x4>;
-			syscon = <&scm_conf>;
-			pbias_mmc_reg: pbias_mmc_omap2430 {
-				regulator-name = "pbias_mmc_omap2430";
-				regulator-min-microvolt = <1800000>;
-				regulator-max-microvolt = <3000000>;
-			};
-		};
-
 		gpio1: gpio@48310000 {
 			compatible = "ti,omap3-gpio";
 			reg = <0x48310000 0x200>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index abc4473..5a206c1 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -196,9 +196,10 @@
 					reg = <0x5a0 0x170>;
 					#address-cells = <1>;
 					#size-cells = <1>;
+					ranges = <0 0x5a0 0x170>;
 
 					pbias_regulator: pbias_regulator {
-						compatible = "ti,pbias-omap";
+						compatible = "ti,pbias-omap4", "ti,pbias-omap";
 						reg = <0x60 0x4>;
 						syscon = <&omap4_padconf_global>;
 						pbias_mmc_reg: pbias_mmc_omap4 {
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 3cc8f35..3cb030f 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -174,8 +174,8 @@
 
 	i2c5_pins: pinmux_i2c5_pins {
 		pinctrl-single,pins = <
-			0x184 (PIN_INPUT | MUX_MODE0)		/* i2c5_scl */
-			0x186 (PIN_INPUT | MUX_MODE0)		/* i2c5_sda */
+			0x186 (PIN_INPUT | MUX_MODE0)		/* i2c5_scl */
+			0x188 (PIN_INPUT | MUX_MODE0)		/* i2c5_sda */
 		>;
 	};
 
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 4205a8a..4c04389 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -185,9 +185,10 @@
 					reg = <0x5a0 0xec>;
 					#address-cells = <1>;
 					#size-cells = <1>;
+					ranges = <0 0x5a0 0xec>;
 
 					pbias_regulator: pbias_regulator {
-						compatible = "ti,pbias-omap";
+						compatible = "ti,pbias-omap5", "ti,pbias-omap";
 						reg = <0x60 0x4>;
 						syscon = <&omap5_padconf_global>;
 						pbias_mmc_reg: pbias_mmc_omap5 {
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index a0b2a79..4624d0f 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -1627,6 +1627,7 @@
 				"mix.0", "mix.1",
 				"dvc.0", "dvc.1",
 				"clk_a", "clk_b", "clk_c", "clk_i";
+		power-domains = <&cpg_clocks>;
 
 		status = "disabled";
 
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 831525d..1666c8a 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -1677,6 +1677,7 @@
 				"mix.0", "mix.1",
 				"dvc.0", "dvc.1",
 				"clk_a", "clk_b", "clk_c", "clk_i";
+		power-domains = <&cpg_clocks>;
 
 		status = "disabled";
 
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 2fa7a0d..275c78c 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -158,6 +158,7 @@
 };
 
 &hdmi {
+	ddc-i2c-bus = <&i2c5>;
 	status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
index 3efa3b2..6b914e4 100644
--- a/arch/arm/boot/dts/stih407.dtsi
+++ b/arch/arm/boot/dts/stih407.dtsi
@@ -103,48 +103,46 @@
 							 <&clk_s_d0_quadfs 0>,
 							 <&clk_s_d2_quadfs 0>,
 							 <&clk_s_d2_quadfs 0>;
-				ranges;
+			};
 
-				sti-hdmi@8d04000 {
-					compatible = "st,stih407-hdmi";
-					reg = <0x8d04000 0x1000>;
-					reg-names = "hdmi-reg";
-					interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
-					interrupt-names	= "irq";
-					clock-names = "pix",
-						      "tmds",
-						      "phy",
-						      "audio",
-						      "main_parent",
-						      "aux_parent";
+			sti-hdmi@8d04000 {
+				compatible = "st,stih407-hdmi";
+				reg = <0x8d04000 0x1000>;
+				reg-names = "hdmi-reg";
+				interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
+				interrupt-names	= "irq";
+				clock-names = "pix",
+					      "tmds",
+					      "phy",
+					      "audio",
+					      "main_parent",
+					      "aux_parent";
 
-					clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
-						 <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
-						 <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
-						 <&clk_s_d0_flexgen CLK_PCM_0>,
-						 <&clk_s_d2_quadfs 0>,
-						 <&clk_s_d2_quadfs 1>;
+				clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
+					 <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
+					 <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
+					 <&clk_s_d0_flexgen CLK_PCM_0>,
+					 <&clk_s_d2_quadfs 0>,
+					 <&clk_s_d2_quadfs 1>;
 
-					hdmi,hpd-gpio = <&pio5 3>;
-					reset-names = "hdmi";
-					resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
-					ddc = <&hdmiddc>;
+				hdmi,hpd-gpio = <&pio5 3>;
+				reset-names = "hdmi";
+				resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
+				ddc = <&hdmiddc>;
+			};
 
-				};
-
-				sti-hda@8d02000 {
-					compatible = "st,stih407-hda";
-					reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
-					reg-names = "hda-reg", "video-dacs-ctrl";
-					clock-names = "pix",
-						      "hddac",
-						      "main_parent",
-						      "aux_parent";
-					clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
-						 <&clk_s_d2_flexgen CLK_HDDAC>,
-						 <&clk_s_d2_quadfs 0>,
-						 <&clk_s_d2_quadfs 1>;
-				};
+			sti-hda@8d02000 {
+				compatible = "st,stih407-hda";
+				reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
+				reg-names = "hda-reg", "video-dacs-ctrl";
+				clock-names = "pix",
+					      "hddac",
+					      "main_parent",
+					      "aux_parent";
+				clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
+					 <&clk_s_d2_flexgen CLK_HDDAC>,
+					 <&clk_s_d2_quadfs 0>,
+					 <&clk_s_d2_quadfs 1>;
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index 6f40bc9..8c6e61a 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -178,48 +178,46 @@
 							 <&clk_s_d0_quadfs 0>,
 							 <&clk_s_d2_quadfs 0>,
 							 <&clk_s_d2_quadfs 0>;
-				ranges;
+			};
 
-				sti-hdmi@8d04000 {
-					compatible = "st,stih407-hdmi";
-					reg = <0x8d04000 0x1000>;
-					reg-names = "hdmi-reg";
-					interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
-					interrupt-names	= "irq";
-					clock-names = "pix",
-						      "tmds",
-						      "phy",
-						      "audio",
-						      "main_parent",
-						      "aux_parent";
+			sti-hdmi@8d04000 {
+				compatible = "st,stih407-hdmi";
+				reg = <0x8d04000 0x1000>;
+				reg-names = "hdmi-reg";
+				interrupts = <GIC_SPI 106 IRQ_TYPE_NONE>;
+				interrupt-names	= "irq";
+				clock-names = "pix",
+					      "tmds",
+					      "phy",
+					      "audio",
+					      "main_parent",
+					      "aux_parent";
 
-					clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
-						 <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
-						 <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
-						 <&clk_s_d0_flexgen CLK_PCM_0>,
-						 <&clk_s_d2_quadfs 0>,
-						 <&clk_s_d2_quadfs 1>;
+				clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>,
+					 <&clk_s_d2_flexgen CLK_TMDS_HDMI>,
+					 <&clk_s_d2_flexgen CLK_REF_HDMIPHY>,
+					 <&clk_s_d0_flexgen CLK_PCM_0>,
+					 <&clk_s_d2_quadfs 0>,
+					 <&clk_s_d2_quadfs 1>;
 
-					hdmi,hpd-gpio = <&pio5 3>;
-					reset-names = "hdmi";
-					resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
-					ddc = <&hdmiddc>;
+				hdmi,hpd-gpio = <&pio5 3>;
+				reset-names = "hdmi";
+				resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>;
+				ddc = <&hdmiddc>;
+			};
 
-				};
-
-				sti-hda@8d02000 {
-					compatible = "st,stih407-hda";
-					reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
-					reg-names = "hda-reg", "video-dacs-ctrl";
-					clock-names = "pix",
-						      "hddac",
-						      "main_parent",
-						      "aux_parent";
-					clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
-						 <&clk_s_d2_flexgen CLK_HDDAC>,
-						 <&clk_s_d2_quadfs 0>,
-						 <&clk_s_d2_quadfs 1>;
-				};
+			sti-hda@8d02000 {
+				compatible = "st,stih407-hda";
+				reg = <0x8d02000 0x400>, <0x92b0120 0x4>;
+				reg-names = "hda-reg", "video-dacs-ctrl";
+				clock-names = "pix",
+					      "hddac",
+					      "main_parent",
+					      "aux_parent";
+				clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>,
+					 <&clk_s_d2_flexgen CLK_HDDAC>,
+					 <&clk_s_d2_quadfs 0>,
+					 <&clk_s_d2_quadfs 1>;
 			};
 		};
 
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 2bebaa2..391230c 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -107,7 +107,7 @@
 				720000	1200000
 				528000	1100000
 				312000	1000000
-				144000	900000
+				144000	1000000
 				>;
 			#cooling-cells = <2>;
 			cooling-min-level = <0>;
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 50c84e1..3f15a5c 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -240,7 +240,8 @@
 CONFIG_PINCTRL_SINGLE=y
 CONFIG_DEBUG_GPIO=y
 CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_PCF857X=m
+CONFIG_GPIO_PCA953X=m
+CONFIG_GPIO_PCF857X=y
 CONFIG_GPIO_TWL4030=y
 CONFIG_GPIO_PALMAS=y
 CONFIG_W1=m
@@ -350,6 +351,8 @@
 CONFIG_USB_MUSB_OMAP2PLUS=m
 CONFIG_USB_MUSB_AM35X=m
 CONFIG_USB_MUSB_DSPS=m
+CONFIG_USB_INVENTRA_DMA=y
+CONFIG_USB_TI_CPPI41_DMA=y
 CONFIG_USB_DWC3=m
 CONFIG_USB_TEST=m
 CONFIG_AM335X_PHY_USB=y
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 32640c4..7cba573 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
  * This may need to be greater than __NR_last_syscall+1 in order to
  * account for the padding in the syscall table
  */
-#define __NR_syscalls  (388)
+#define __NR_syscalls  (392)
 
 /*
  * *NOTE*: This is a ghost syscall private to the kernel.  Only the
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 0c3f5a0..7a2a32a1 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -414,6 +414,8 @@
 #define __NR_memfd_create		(__NR_SYSCALL_BASE+385)
 #define __NR_bpf			(__NR_SYSCALL_BASE+386)
 #define __NR_execveat			(__NR_SYSCALL_BASE+387)
+#define __NR_userfaultfd		(__NR_SYSCALL_BASE+388)
+#define __NR_membarrier			(__NR_SYSCALL_BASE+389)
 
 /*
  * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 05745eb..fde6c88 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -397,6 +397,8 @@
 /* 385 */	CALL(sys_memfd_create)
 		CALL(sys_bpf)
 		CALL(sys_execveat)
+		CALL(sys_userfaultfd)
+		CALL(sys_membarrier)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index 9bdf547..5697819 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -20,6 +20,7 @@
 #include <asm/cputype.h>
 #include <asm/cp15.h>
 #include <asm/mcpm.h>
+#include <asm/smp_plat.h>
 
 #include "regs-pmu.h"
 #include "common.h"
@@ -70,7 +71,31 @@
 		cluster >= EXYNOS5420_NR_CLUSTERS)
 		return -EINVAL;
 
-	exynos_cpu_power_up(cpunr);
+	if (!exynos_cpu_power_state(cpunr)) {
+		exynos_cpu_power_up(cpunr);
+
+		/*
+		 * This assumes the cluster number of the big cores(Cortex A15)
+		 * is 0 and the Little cores(Cortex A7) is 1.
+		 * When the system was booted from the Little core,
+		 * they should be reset during power up cpu.
+		 */
+		if (cluster &&
+		    cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
+			/*
+			 * Before we reset the Little cores, we should wait
+			 * the SPARE2 register is set to 1 because the init
+			 * codes of the iROM will set the register after
+			 * initialization.
+			 */
+			while (!pmu_raw_readl(S5P_PMU_SPARE2))
+				udelay(10);
+
+			pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
+					EXYNOS_SWRESET);
+		}
+	}
+
 	return 0;
 }
 
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
index b761433..fba9068 100644
--- a/arch/arm/mach-exynos/regs-pmu.h
+++ b/arch/arm/mach-exynos/regs-pmu.h
@@ -513,6 +513,12 @@
 #define SPREAD_ENABLE						0xF
 #define SPREAD_USE_STANDWFI					0xF
 
+#define EXYNOS5420_KFC_CORE_RESET0				BIT(8)
+#define EXYNOS5420_KFC_ETM_RESET0				BIT(20)
+
+#define EXYNOS5420_KFC_CORE_RESET(_nr)				\
+	((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
+
 #define EXYNOS5420_BB_CON1					0x0784
 #define EXYNOS5420_BB_SEL_EN					BIT(31)
 #define EXYNOS5420_BB_PMOS_EN					BIT(7)
diff --git a/arch/arm/mach-gemini/board-nas4220b.c b/arch/arm/mach-gemini/board-nas4220b.c
index ca8a25b..18b1279 100644
--- a/arch/arm/mach-gemini/board-nas4220b.c
+++ b/arch/arm/mach-gemini/board-nas4220b.c
@@ -18,7 +18,6 @@
 #include <linux/leds.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
-#include <linux/mdio-gpio.h>
 #include <linux/io.h>
 
 #include <asm/setup.h>
diff --git a/arch/arm/mach-gemini/board-wbd111.c b/arch/arm/mach-gemini/board-wbd111.c
index 418188c..14c56f3 100644
--- a/arch/arm/mach-gemini/board-wbd111.c
+++ b/arch/arm/mach-gemini/board-wbd111.c
@@ -15,7 +15,6 @@
 #include <linux/input.h>
 #include <linux/skbuff.h>
 #include <linux/gpio_keys.h>
-#include <linux/mdio-gpio.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <asm/mach-types.h>
diff --git a/arch/arm/mach-gemini/board-wbd222.c b/arch/arm/mach-gemini/board-wbd222.c
index 266b265..6070282 100644
--- a/arch/arm/mach-gemini/board-wbd222.c
+++ b/arch/arm/mach-gemini/board-wbd222.c
@@ -15,7 +15,6 @@
 #include <linux/input.h>
 #include <linux/skbuff.h>
 #include <linux/gpio_keys.h>
-#include <linux/mdio-gpio.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <asm/mach-types.h>
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 07d2e10..b3a0dff 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -44,10 +44,11 @@
 	select ARM_CPU_SUSPEND if PM
 	select ARM_GIC
 	select HAVE_ARM_SCU if SMP
-	select HAVE_ARM_TWD if SMP
 	select HAVE_ARM_ARCH_TIMER
 	select ARM_ERRATA_798181 if SMP
+	select OMAP_INTERCONNECT
 	select OMAP_INTERCONNECT_BARRIER
+	select PM_OPP if PM
 
 config SOC_AM33XX
 	bool "TI AM33XX"
@@ -70,10 +71,13 @@
 	select ARCH_OMAP2PLUS
 	select ARM_CPU_SUSPEND if PM
 	select ARM_GIC
+	select HAVE_ARM_SCU if SMP
 	select HAVE_ARM_ARCH_TIMER
 	select IRQ_CROSSBAR
 	select ARM_ERRATA_798181 if SMP
+	select OMAP_INTERCONNECT
 	select OMAP_INTERCONNECT_BARRIER
+	select PM_OPP if PM
 
 config ARCH_OMAP2PLUS
 	bool
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 24c9afc..6133eaa 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -20,13 +20,6 @@
 
 #include "common.h"
 
-#if !(defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3))
-#define intc_of_init	NULL
-#endif
-#ifndef CONFIG_ARCH_OMAP4
-#define gic_of_init		NULL
-#endif
-
 static const struct of_device_id omap_dt_match_table[] __initconst = {
 	{ .compatible = "simple-bus", },
 	{ .compatible = "ti,omap-infra", },
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index e3f713f..54a5ba5 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -653,8 +653,12 @@
 			omap_revision = DRA752_REV_ES1_0;
 			break;
 		case 1:
-		default:
 			omap_revision = DRA752_REV_ES1_1;
+			break;
+		case 2:
+		default:
+			omap_revision = DRA752_REV_ES2_0;
+			break;
 		}
 		break;
 
@@ -674,7 +678,7 @@
 		/* Unknown default to latest silicon rev as default*/
 		pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n",
 			__func__, idcode, hawkeye, rev);
-		omap_revision = DRA752_REV_ES1_1;
+		omap_revision = DRA752_REV_ES2_0;
 	}
 
 	sprintf(soc_name, "DRA%03x", omap_rev() >> 16);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 980c937..3eaeaca 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -676,6 +676,7 @@
 void __init am43xx_init_late(void)
 {
 	omap_common_late_init();
+	omap2_clk_enable_autoidle_all();
 }
 #endif
 
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 4cb8fd9..72ebc4c 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -901,7 +901,8 @@
 		if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE)
 			return 0;
 
-	if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
+	if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER &&
+	    od->_driver_status != BUS_NOTIFY_BIND_DRIVER) {
 		if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
 			dev_warn(dev, "%s: enabled but no driver.  Idling\n",
 				 __func__);
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 425bfcd..b668719 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -103,7 +103,8 @@
 #define PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD	(1 << 0)
 #define PM_OMAP4_CPU_OSWR_DISABLE		(1 << 1)
 
-#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
+#if defined(CONFIG_PM) && (defined(CONFIG_ARCH_OMAP4) ||\
+	   defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX))
 extern u16 pm44xx_errata;
 #define IS_PM44XX_ERRATUM(id)		(pm44xx_errata & (id))
 #else
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index f97654d..2d1d384 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -469,6 +469,8 @@
 #define DRA7XX_CLASS		0x07000000
 #define DRA752_REV_ES1_0	(DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8))
 #define DRA752_REV_ES1_1	(DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8))
+#define DRA752_REV_ES2_0	(DRA7XX_CLASS | (0x52 << 16) | (0x20 << 8))
+#define DRA722_REV_ES1_0	(DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
 #define DRA722_REV_ES1_0	(DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
 
 void omap2xxx_check_revision(void);
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index e4d8701..a556551 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -297,12 +297,8 @@
 	if (IS_ERR(src))
 		return PTR_ERR(src);
 
-	r = clk_set_parent(timer->fclk, src);
-	if (r < 0) {
-		pr_warn("%s: %s cannot set source\n", __func__, oh->name);
-		clk_put(src);
-		return r;
-	}
+	WARN(clk_set_parent(timer->fclk, src) < 0,
+	     "Cannot set timer parent clock, no PLL clock driver?");
 
 	clk_put(src);
 
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index e5a35f6..d44d311 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -300,7 +300,7 @@
 
 	val = voltdm->read(OMAP3_PRM_POLCTRL_OFFSET);
 	if (!(val & OMAP3430_PRM_POLCTRL_CLKREQ_POL) ||
-	    (val & OMAP3430_PRM_POLCTRL_CLKREQ_POL)) {
+	    (val & OMAP3430_PRM_POLCTRL_OFFMODE_POL)) {
 		val |= OMAP3430_PRM_POLCTRL_CLKREQ_POL;
 		val &= ~OMAP3430_PRM_POLCTRL_OFFMODE_POL;
 		pr_debug("PM: fixing sys_clkreq and sys_off_mode polarity to 0x%x\n",
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index a3ebb51..a727282 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -502,7 +502,7 @@
 					balloon3_irq_enabled;
 	do {
 		struct irq_data *d = irq_desc_get_irq_data(desc);
-		struct irq_chip *chip = irq_data_get_chip(d);
+		struct irq_chip *chip = irq_desc_get_chip(desc);
 		unsigned int irq;
 
 		/* clear useless edge notification */
diff --git a/arch/arm/mach-pxa/include/mach/addr-map.h b/arch/arm/mach-pxa/include/mach/addr-map.h
index d28fe29..07b93fd 100644
--- a/arch/arm/mach-pxa/include/mach/addr-map.h
+++ b/arch/arm/mach-pxa/include/mach/addr-map.h
@@ -44,6 +44,13 @@
  */
 
 /*
+ * DFI Bus for NAND, PXA3xx only
+ */
+#define NAND_PHYS		0x43100000
+#define NAND_VIRT		IOMEM(0xf6300000)
+#define NAND_SIZE		0x00100000
+
+/*
  * Internal Memory Controller (PXA27x and later)
  */
 #define IMEMC_PHYS		0x58000000
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index ce0f8d6..06005d3 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -47,6 +47,13 @@
 #define ISRAM_START	0x5c000000
 #define ISRAM_SIZE	SZ_256K
 
+/*
+ * NAND NFC: DFI bus arbitration subset
+ */
+#define NDCR			(*(volatile u32 __iomem*)(NAND_VIRT + 0))
+#define NDCR_ND_ARB_EN		(1 << 12)
+#define NDCR_ND_ARB_CNTL	(1 << 19)
+
 static void __iomem *sram;
 static unsigned long wakeup_src;
 
@@ -362,7 +369,12 @@
 		.pfn		= __phys_to_pfn(PXA3XX_SMEMC_BASE),
 		.length		= SMEMC_SIZE,
 		.type		= MT_DEVICE
-	}
+	}, {
+		.virtual	= (unsigned long)NAND_VIRT,
+		.pfn		= __phys_to_pfn(NAND_PHYS),
+		.length		= NAND_SIZE,
+		.type		= MT_DEVICE
+	},
 };
 
 void __init pxa3xx_map_io(void)
@@ -419,6 +431,13 @@
 		 */
 		ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
 
+		/*
+		 * Disable DFI bus arbitration, to prevent a system bus lock if
+		 * somebody disables the NAND clock (unused clock) while this
+		 * bit remains set.
+		 */
+		NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
+
 		if ((ret = pxa_init_dma(IRQ_DMA, 32)))
 			return ret;
 
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 9769f1e..00b7f7d 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -365,15 +365,21 @@
  user:
 	if (LDST_L_BIT(instr)) {
 		unsigned long val;
+		unsigned int __ua_flags = uaccess_save_and_enable();
+
 		get16t_unaligned_check(val, addr);
+		uaccess_restore(__ua_flags);
 
 		/* signed half-word? */
 		if (instr & 0x40)
 			val = (signed long)((signed short) val);
 
 		regs->uregs[rd] = val;
-	} else
+	} else {
+		unsigned int __ua_flags = uaccess_save_and_enable();
 		put16t_unaligned_check(regs->uregs[rd], addr);
+		uaccess_restore(__ua_flags);
+	}
 
 	return TYPE_LDST;
 
@@ -420,14 +426,21 @@
 
  user:
 	if (load) {
-		unsigned long val;
+		unsigned long val, val2;
+		unsigned int __ua_flags = uaccess_save_and_enable();
+
 		get32t_unaligned_check(val, addr);
+		get32t_unaligned_check(val2, addr + 4);
+
+		uaccess_restore(__ua_flags);
+
 		regs->uregs[rd] = val;
-		get32t_unaligned_check(val, addr + 4);
-		regs->uregs[rd2] = val;
+		regs->uregs[rd2] = val2;
 	} else {
+		unsigned int __ua_flags = uaccess_save_and_enable();
 		put32t_unaligned_check(regs->uregs[rd], addr);
 		put32t_unaligned_check(regs->uregs[rd2], addr + 4);
+		uaccess_restore(__ua_flags);
 	}
 
 	return TYPE_LDST;
@@ -458,10 +471,15 @@
  trans:
 	if (LDST_L_BIT(instr)) {
 		unsigned int val;
+		unsigned int __ua_flags = uaccess_save_and_enable();
 		get32t_unaligned_check(val, addr);
+		uaccess_restore(__ua_flags);
 		regs->uregs[rd] = val;
-	} else
+	} else {
+		unsigned int __ua_flags = uaccess_save_and_enable();
 		put32t_unaligned_check(regs->uregs[rd], addr);
+		uaccess_restore(__ua_flags);
+	}
 	return TYPE_LDST;
 
  fault:
@@ -531,6 +549,7 @@
 #endif
 
 	if (user_mode(regs)) {
+		unsigned int __ua_flags = uaccess_save_and_enable();
 		for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
 		     regbits >>= 1, rd += 1)
 			if (regbits & 1) {
@@ -542,6 +561,7 @@
 					put32t_unaligned_check(regs->uregs[rd], eaddr);
 				eaddr += 4;
 			}
+		uaccess_restore(__ua_flags);
 	} else {
 		for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
 		     regbits >>= 1, rd += 1)
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 876060b..2f4b14c 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -125,7 +125,7 @@
 }
 
 /*
- * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
+ * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
  * (where the assembly routines like __aeabi_uidiv could cause problems).
  */
 static u32 jit_udiv(u32 dividend, u32 divisor)
@@ -133,6 +133,11 @@
 	return dividend / divisor;
 }
 
+static u32 jit_mod(u32 dividend, u32 divisor)
+{
+	return dividend % divisor;
+}
+
 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
 {
 	inst |= (cond << 28);
@@ -471,11 +476,17 @@
 #endif
 }
 
-static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
+static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx,
+				int bpf_op)
 {
 #if __LINUX_ARM_ARCH__ == 7
 	if (elf_hwcap & HWCAP_IDIVA) {
-		emit(ARM_UDIV(rd, rm, rn), ctx);
+		if (bpf_op == BPF_DIV)
+			emit(ARM_UDIV(rd, rm, rn), ctx);
+		else {
+			emit(ARM_UDIV(ARM_R3, rm, rn), ctx);
+			emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx);
+		}
 		return;
 	}
 #endif
@@ -496,7 +507,8 @@
 		emit(ARM_MOV_R(ARM_R0, rm), ctx);
 
 	ctx->seen |= SEEN_CALL;
-	emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
+	emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod,
+		   ctx);
 	emit_blx_r(ARM_R3, ctx);
 
 	if (rd != ARM_R0)
@@ -614,6 +626,7 @@
 		case BPF_LD | BPF_B | BPF_IND:
 			load_order = 0;
 load_ind:
+			update_on_xread(ctx);
 			OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
 			goto load_common;
 		case BPF_LDX | BPF_IMM:
@@ -697,13 +710,27 @@
 			if (k == 1)
 				break;
 			emit_mov_i(r_scratch, k, ctx);
-			emit_udiv(r_A, r_A, r_scratch, ctx);
+			emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV);
 			break;
 		case BPF_ALU | BPF_DIV | BPF_X:
 			update_on_xread(ctx);
 			emit(ARM_CMP_I(r_X, 0), ctx);
 			emit_err_ret(ARM_COND_EQ, ctx);
-			emit_udiv(r_A, r_A, r_X, ctx);
+			emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV);
+			break;
+		case BPF_ALU | BPF_MOD | BPF_K:
+			if (k == 1) {
+				emit_mov_i(r_A, 0, ctx);
+				break;
+			}
+			emit_mov_i(r_scratch, k, ctx);
+			emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD);
+			break;
+		case BPF_ALU | BPF_MOD | BPF_X:
+			update_on_xread(ctx);
+			emit(ARM_CMP_I(r_X, 0), ctx);
+			emit_err_ret(ARM_COND_EQ, ctx);
+			emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD);
 			break;
 		case BPF_ALU | BPF_OR | BPF_K:
 			/* A |= K */
@@ -1047,7 +1074,7 @@
 
 	set_memory_ro((unsigned long)header, header->pages);
 	fp->bpf_func = (void *)ctx.target;
-	fp->jited = true;
+	fp->jited = 1;
 out:
 	kfree(ctx.offsets);
 	return;
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index 4b17d5ab..c46fca2 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -115,6 +115,8 @@
 
 #define ARM_INST_UMULL		0x00800090
 
+#define ARM_INST_MLS		0x00600090
+
 /*
  * Use a suitable undefined instruction to use for ARM/Thumb2 faulting.
  * We need to be careful not to conflict with those used by other modules
@@ -210,4 +212,7 @@
 #define ARM_UMULL(rd_lo, rd_hi, rn, rm)	(ARM_INST_UMULL | (rd_hi) << 16 \
 					 | (rd_lo) << 12 | (rm) << 8 | rn)
 
+#define ARM_MLS(rd, rn, rm, ra)	(ARM_INST_MLS | (rd) << 16 | (rn) | (rm) << 8 \
+				 | (ra) << 12)
+
 #endif /* PFILTER_OPCODES_ARM_H */
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index ad9529c..daa1a65 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -107,7 +107,6 @@
 	{ .compatible = "mvrl,pxa168-ssp",	.data = (void *) PXA168_SSP },
 	{ .compatible = "mrvl,pxa910-ssp",	.data = (void *) PXA910_SSP },
 	{ .compatible = "mrvl,ce4100-ssp",	.data = (void *) CE4100_SSP },
-	{ .compatible = "mrvl,lpss-ssp",	.data = (void *) LPSS_SSP },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index f9914d7..d10b5d4 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -42,7 +42,7 @@
 CHECKFLAGS	+= -D__aarch64__
 
 ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
-CFLAGS_MODULE	+= -mcmodel=large
+KBUILD_CFLAGS_MODULE	+= -mcmodel=large
 endif
 
 # Default value
diff --git a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
index 3500586..606dd5a 100644
--- a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
@@ -13,14 +13,12 @@
 		reg = <0x0 0x803c0000 0x0 0x10000
 		       0x0 0x80000000 0x0 0x10000>;
 
-		soc0_phy4: ethernet-phy@4 {
+		soc0_phy0: ethernet-phy@0 {
 			reg = <0x0>;
-			device_type = "ethernet-phy";
 			compatible = "ethernet-phy-ieee802.3-c22";
 		};
-		soc0_phy5: ethernet-phy@5 {
+		soc0_phy1: ethernet-phy@1 {
 			reg = <0x1>;
-			device_type = "ethernet-phy";
 			compatible = "ethernet-phy-ieee802.3-c22";
 		};
 	};
@@ -37,7 +35,7 @@
 		       0x0 0xc7000000 0x0 0x60000
 		       >;
 
-		phy-handle = <0 0 0 0 &soc0_phy4 &soc0_phy5 0 0>;
+		phy-handle = <0 0 0 0 &soc0_phy0 &soc0_phy1 0 0>;
 		interrupts = <
 			/* [14] ge fifo err 8 / xge 6**/
 			149 0x4 150 0x4 151 0x4 152 0x4
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b0329be..26b0666 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -79,7 +79,7 @@
 #define PAGE_S2			__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
 #define PAGE_S2_DEVICE		__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
 
-#define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
 #define PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
 #define PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
@@ -496,7 +496,7 @@
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
-			      PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
+			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
 	/* preserve the hardware dirty information */
 	if (pte_hw_dirty(pte))
 		pte = pte_mkdirty(pte);
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 3bc498c..41e58fe 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush	(__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls		(__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls		388
+#define __NR_compat_syscalls		390
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index cef934a..5b925b7 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -797,3 +797,12 @@
 __SYSCALL(__NR_bpf, sys_bpf)
 #define __NR_execveat 387
 __SYSCALL(__NR_execveat, compat_sys_execveat)
+#define __NR_userfaultfd 388
+__SYSCALL(__NR_userfaultfd, sys_userfaultfd)
+#define __NR_membarrier 389
+__SYSCALL(__NR_membarrier, sys_membarrier)
+
+/*
+ * Please add new compat syscalls above this comment and update
+ * __NR_compat_syscalls in asm/unistd.h.
+ */
diff --git a/arch/arm64/include/uapi/asm/signal.h b/arch/arm64/include/uapi/asm/signal.h
index 8d1e723..991bf5d 100644
--- a/arch/arm64/include/uapi/asm/signal.h
+++ b/arch/arm64/include/uapi/asm/signal.h
@@ -19,6 +19,9 @@
 /* Required for AArch32 compatibility. */
 #define SA_RESTORER	0x04000000
 
+#define MINSIGSTKSZ 5120
+#define SIGSTKSZ    16384
+
 #include <asm-generic/signal.h>
 
 #endif
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index cebf786..253021e 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -201,7 +201,7 @@
 }
 
 /*
- * Call registered single step handers
+ * Call registered single step handlers
  * There is no Syndrome info to check for determining the handler.
  * So we call all the registered handlers, until the right handler is
  * found which returns zero.
@@ -271,20 +271,21 @@
  * Use reader/writer locks instead of plain spinlock.
  */
 static LIST_HEAD(break_hook);
-static DEFINE_RWLOCK(break_hook_lock);
+static DEFINE_SPINLOCK(break_hook_lock);
 
 void register_break_hook(struct break_hook *hook)
 {
-	write_lock(&break_hook_lock);
-	list_add(&hook->node, &break_hook);
-	write_unlock(&break_hook_lock);
+	spin_lock(&break_hook_lock);
+	list_add_rcu(&hook->node, &break_hook);
+	spin_unlock(&break_hook_lock);
 }
 
 void unregister_break_hook(struct break_hook *hook)
 {
-	write_lock(&break_hook_lock);
-	list_del(&hook->node);
-	write_unlock(&break_hook_lock);
+	spin_lock(&break_hook_lock);
+	list_del_rcu(&hook->node);
+	spin_unlock(&break_hook_lock);
+	synchronize_rcu();
 }
 
 static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@@ -292,11 +293,11 @@
 	struct break_hook *hook;
 	int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
 
-	read_lock(&break_hook_lock);
-	list_for_each_entry(hook, &break_hook, node)
+	rcu_read_lock();
+	list_for_each_entry_rcu(hook, &break_hook, node)
 		if ((esr & hook->esr_mask) == hook->esr_val)
 			fn = hook->fn;
-	read_unlock(&break_hook_lock);
+	rcu_read_unlock();
 
 	return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
 }
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index e8ca6ea..13671a9 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -258,7 +258,8 @@
 		 */
 		if (!is_normal_ram(md))
 			prot = __pgprot(PROT_DEVICE_nGnRE);
-		else if (md->type == EFI_RUNTIME_SERVICES_CODE)
+		else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+			 !PAGE_ALIGNED(md->phys_addr))
 			prot = PAGE_KERNEL_EXEC;
 		else
 			prot = PAGE_KERNEL;
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 08cafc5..0f03a8f 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -178,6 +178,24 @@
 ENDPROC(ftrace_stub)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	/* save return value regs*/
+	.macro save_return_regs
+	sub sp, sp, #64
+	stp x0, x1, [sp]
+	stp x2, x3, [sp, #16]
+	stp x4, x5, [sp, #32]
+	stp x6, x7, [sp, #48]
+	.endm
+
+	/* restore return value regs*/
+	.macro restore_return_regs
+	ldp x0, x1, [sp]
+	ldp x2, x3, [sp, #16]
+	ldp x4, x5, [sp, #32]
+	ldp x6, x7, [sp, #48]
+	add sp, sp, #64
+	.endm
+
 /*
  * void ftrace_graph_caller(void)
  *
@@ -204,11 +222,11 @@
  * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
  */
 ENTRY(return_to_handler)
-	str	x0, [sp, #-16]!
+	save_return_regs
 	mov	x0, x29			//     parent's fp
 	bl	ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
 	mov	x30, x0			// restore the original return address
-	ldr	x0, [sp], #16
+	restore_return_regs
 	ret
 END(return_to_handler)
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index f341866..c08b9ad 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -85,7 +85,7 @@
 		aarch64_insn_is_bcond(insn));
 }
 
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
 
 static void __kprobes *patch_map(void *addr, int fixmap)
 {
@@ -131,13 +131,13 @@
 	unsigned long flags = 0;
 	int ret;
 
-	spin_lock_irqsave(&patch_lock, flags);
+	raw_spin_lock_irqsave(&patch_lock, flags);
 	waddr = patch_map(addr, FIX_TEXT_POKE0);
 
 	ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
 
 	patch_unmap(FIX_TEXT_POKE0);
-	spin_unlock_irqrestore(&patch_lock, flags);
+	raw_spin_unlock_irqrestore(&patch_lock, flags);
 
 	return ret;
 }
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 6bab21f..2322479 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -364,6 +364,8 @@
 		to_free = ram_end - orig_start;
 
 	size = orig_end - orig_start;
+	if (!size)
+		return;
 
 	/* initrd needs to be relocated completely inside linear mapping */
 	new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index aba9ead..9fadf6d 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -287,6 +287,7 @@
 			 * starvation.
 			 */
 			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			mm_flags |= FAULT_FLAG_TRIED;
 			goto retry;
 		}
 	}
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index c047598..a44e529 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -744,7 +744,7 @@
 
 	set_memory_ro((unsigned long)header, header->pages);
 	prog->bpf_func = (void *)ctx.image;
-	prog->jited = true;
+	prog->jited = 1;
 out:
 	kfree(ctx.offset);
 }
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index f61f2dd..241b9b9 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -20,4 +20,5 @@
 generic-y += topology.h
 generic-y += trace_clock.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 61cd1e7..91d49c0 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -46,4 +46,5 @@
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += user.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index f17c4dc..945544e 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -59,4 +59,5 @@
 generic-y += ucontext.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index b7f6819..1778805 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -43,4 +43,5 @@
 generic-y += trace_clock.h
 generic-y += types.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild
index 8e47b83..1fa084c 100644
--- a/arch/frv/include/asm/Kbuild
+++ b/arch/frv/include/asm/Kbuild
@@ -7,3 +7,4 @@
 generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 70e6ae1..373cb23 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -73,4 +73,5 @@
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index daee37b..db8ddab 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -58,4 +58,5 @@
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 9de3ba1..502a91d 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -8,3 +8,4 @@
 generic-y += preempt.h
 generic-y += trace_clock.h
 generic-y += vtime.h
+generic-y += word-at-a-time.h
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index e0eb704..fd104bd 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -9,3 +9,4 @@
 generic-y += preempt.h
 generic-y += sections.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 0b6b40d..5b4ec54 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -10,6 +10,7 @@
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -57,7 +58,6 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -67,10 +67,12 @@
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -179,6 +181,7 @@
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -206,6 +209,7 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -271,6 +275,7 @@
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -370,6 +375,7 @@
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -537,6 +543,7 @@
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index eeb3a89..6e5198e 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -10,6 +10,7 @@
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -344,6 +349,7 @@
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -495,6 +501,7 @@
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 3a70066..f75600b 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -10,6 +10,7 @@
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -355,6 +360,7 @@
 # CONFIG_NET_VENDOR_SEEQ is not set
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -517,6 +523,7 @@
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 0586b32..a42d91c 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -10,6 +10,7 @@
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -488,6 +494,7 @@
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index ad1dbce..77f4a11 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -10,6 +10,7 @@
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -345,6 +350,7 @@
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -497,6 +503,7 @@
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index b44acac..5a329f7 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -10,6 +10,7 @@
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -54,7 +55,6 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -64,10 +64,12 @@
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -176,6 +178,7 @@
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -203,6 +206,7 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -271,6 +275,7 @@
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -364,6 +369,7 @@
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -519,6 +525,7 @@
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 8afca37..83c80d2 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -10,6 +10,7 @@
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -64,7 +65,6 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -74,10 +74,12 @@
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -186,6 +188,7 @@
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -213,6 +216,7 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -281,6 +285,7 @@
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -410,6 +415,7 @@
 # CONFIG_NET_VENDOR_SEEQ is not set
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PLIP=m
@@ -599,6 +605,7 @@
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index ef00875..6cb42c3 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -10,6 +10,7 @@
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -52,7 +53,6 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -62,10 +62,12 @@
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -174,6 +176,7 @@
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -201,6 +204,7 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -266,6 +270,7 @@
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -488,6 +494,7 @@
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 387c2bd..c7508c3 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -10,6 +10,7 @@
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -488,6 +494,7 @@
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 35355c1..64b71664 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -10,6 +10,7 @@
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -354,6 +359,7 @@
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PLIP=m
@@ -510,6 +516,7 @@
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 8442d26..9a4cab7 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -10,6 +10,7 @@
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -50,7 +51,6 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -60,10 +60,12 @@
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -172,6 +174,7 @@
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -199,6 +202,7 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -264,6 +268,7 @@
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -341,6 +346,7 @@
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -489,6 +495,7 @@
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 0e1b542..1a2eaac 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -10,6 +10,7 @@
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -50,7 +51,6 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -60,10 +60,12 @@
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -172,6 +174,7 @@
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -199,6 +202,7 @@
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -264,6 +268,7 @@
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -341,6 +346,7 @@
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -489,6 +495,7 @@
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
index 5a822bb..066e74f 100644
--- a/arch/m68k/include/asm/linkage.h
+++ b/arch/m68k/include/asm/linkage.h
@@ -4,4 +4,34 @@
 #define __ALIGN .align 4
 #define __ALIGN_STR ".align 4"
 
+/*
+ * Make sure the compiler doesn't do anything stupid with the
+ * arguments on the stack - they are owned by the *caller*, not
+ * the callee. This just fools gcc into not spilling into them,
+ * and keeps it from doing tailcall recursion and/or using the
+ * stack slots for temporaries, since they are live and "used"
+ * all the way to the end of the function.
+ */
+#define asmlinkage_protect(n, ret, args...) \
+	__asmlinkage_protect##n(ret, ##args)
+#define __asmlinkage_protect_n(ret, args...) \
+	__asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
+#define __asmlinkage_protect0(ret) \
+	__asmlinkage_protect_n(ret)
+#define __asmlinkage_protect1(ret, arg1) \
+	__asmlinkage_protect_n(ret, "m" (arg1))
+#define __asmlinkage_protect2(ret, arg1, arg2) \
+	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
+	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
+	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+			      "m" (arg4))
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
+	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+			      "m" (arg4), "m" (arg5))
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
+	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+			      "m" (arg4), "m" (arg5), "m" (arg6))
+
 #endif
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 244e0db..0793a7f 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls		356
+#define NR_syscalls		375
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 61fb6cb..5e6fae6 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -361,5 +361,24 @@
 #define __NR_memfd_create	353
 #define __NR_bpf		354
 #define __NR_execveat		355
+#define __NR_socket		356
+#define __NR_socketpair		357
+#define __NR_bind		358
+#define __NR_connect		359
+#define __NR_listen		360
+#define __NR_accept4		361
+#define __NR_getsockopt		362
+#define __NR_setsockopt		363
+#define __NR_getsockname	364
+#define __NR_getpeername	365
+#define __NR_sendto		366
+#define __NR_sendmsg		367
+#define __NR_recvfrom		368
+#define __NR_recvmsg		369
+#define __NR_shutdown		370
+#define __NR_recvmmsg		371
+#define __NR_sendmmsg		372
+#define __NR_userfaultfd	373
+#define __NR_membarrier		374
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index a0ec430..5dd0e80 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -376,4 +376,22 @@
 	.long sys_memfd_create
 	.long sys_bpf
 	.long sys_execveat		/* 355 */
-
+	.long sys_socket
+	.long sys_socketpair
+	.long sys_bind
+	.long sys_connect
+	.long sys_listen		/* 360 */
+	.long sys_accept4
+	.long sys_getsockopt
+	.long sys_setsockopt
+	.long sys_getsockname
+	.long sys_getpeername		/* 365 */
+	.long sys_sendto
+	.long sys_sendmsg
+	.long sys_recvfrom
+	.long sys_recvmsg
+	.long sys_shutdown		/* 370 */
+	.long sys_recvmmsg
+	.long sys_sendmmsg
+	.long sys_userfaultfd
+	.long sys_membarrier
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index cd38f29..2290c0c 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -29,6 +29,7 @@
 
 int psc_present;
 volatile __u8 *psc;
+EXPORT_SYMBOL_GPL(psc);
 
 /*
  * Debugging dump, used in various places to see what's going on.
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index df31353..29acb89d 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -54,4 +54,5 @@
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 2f222f3..b0ae88c 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -10,3 +10,4 @@
 generic-y += preempt.h
 generic-y += syscalls.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 15ecb48..eeb3953 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -293,8 +293,26 @@
 
 	return 0;
 }
-IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc",
-		ath79_misc_intc_of_init);
+
+static int __init ar7100_misc_intc_of_init(
+	struct device_node *node, struct device_node *parent)
+{
+	ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+	return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
+		ar7100_misc_intc_of_init);
+
+static int __init ar7240_misc_intc_of_init(
+	struct device_node *node, struct device_node *parent)
+{
+	ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+	return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
+		ar7240_misc_intc_of_init);
 
 static int __init ar79_cpu_intc_of_init(
 	struct device_node *node, struct device_node *parent)
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index 51ed599..e970fd9 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -4,6 +4,7 @@
 	bool "SSB Support for Broadcom BCM47XX"
 	select SYS_HAS_CPU_BMIPS32_3300
 	select SSB
+	select SSB_HOST_SOC
 	select SSB_DRIVER_MIPS
 	select SSB_DRIVER_EXTIF
 	select SSB_EMBEDDED
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 89a6284..bd63425 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -933,7 +933,7 @@
 	while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
 		&& (total < MAX_MEMORY)) {
 		memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
-						__pa_symbol(&__init_end), -1,
+						__pa_symbol(&_end), -1,
 						0x100000,
 						CVMX_BOOTMEM_FLAG_NO_LOCKING);
 		if (memory >= 0) {
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 40ec4ca..c7fe4d0 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -17,4 +17,5 @@
 generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += user.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 9801ac9..fe67f12 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -20,6 +20,9 @@
 #ifndef cpu_has_tlb
 #define cpu_has_tlb		(cpu_data[0].options & MIPS_CPU_TLB)
 #endif
+#ifndef cpu_has_ftlb
+#define cpu_has_ftlb		(cpu_data[0].options & MIPS_CPU_FTLB)
+#endif
 #ifndef cpu_has_tlbinv
 #define cpu_has_tlbinv		(cpu_data[0].options & MIPS_CPU_TLBINV)
 #endif
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index cd89e98..82ad15f 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -385,6 +385,7 @@
 #define MIPS_CPU_CDMM		0x4000000000ull	/* CPU has Common Device Memory Map */
 #define MIPS_CPU_BP_GHIST	0x8000000000ull /* R12K+ Branch Prediction Global History */
 #define MIPS_CPU_SP		0x10000000000ull /* Small (1KB) page support */
+#define MIPS_CPU_FTLB		0x20000000000ull /* CPU has Fixed-page-size TLB */
 
 /*
  * CPU ASE encodings
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 9e777cd..d10fd80 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -256,6 +256,7 @@
  */
 #define ioremap_nocache(offset, size)					\
 	__ioremap_mode((offset), (size), _CACHE_UNCACHED)
+#define ioremap_uc ioremap_nocache
 
 /*
  * ioremap_cachable -	map bus memory into CPU space
diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h
index b02891f..21d9607 100644
--- a/arch/mips/include/asm/maar.h
+++ b/arch/mips/include/asm/maar.h
@@ -66,6 +66,15 @@
 }
 
 /**
+ * maar_init() - initialise MAARs
+ *
+ * Performs initialisation of MAARs for the current CPU, making use of the
+ * platforms implementation of platform_maar_init where necessary and
+ * duplicating the setup it provides on secondary CPUs.
+ */
+extern void maar_init(void);
+
+/**
  * struct maar_config - MAAR configuration data
  * @lower:	The lowest address that the MAAR pair will affect. Must be
  *		aligned to a 2^16 byte boundary.
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index d75b75e..1f1927a 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -194,6 +194,7 @@
 BUILD_CM_R_(gic_status,		MIPS_CM_GCB_OFS + 0xd0)
 BUILD_CM_R_(cpc_status,		MIPS_CM_GCB_OFS + 0xf0)
 BUILD_CM_RW(l2_config,		MIPS_CM_GCB_OFS + 0x130)
+BUILD_CM_RW(sys_config2,	MIPS_CM_GCB_OFS + 0x150)
 
 /* Core Local & Core Other register accessor functions */
 BUILD_CM_Cx_RW(reset_release,	0x00)
@@ -316,6 +317,10 @@
 #define CM_GCR_L2_CONFIG_ASSOC_SHF		0
 #define CM_GCR_L2_CONFIG_ASSOC_MSK		(_ULCAST_(0xff) << 0)
 
+/* GCR_SYS_CONFIG2 register fields */
+#define CM_GCR_SYS_CONFIG2_MAXVPW_SHF		0
+#define CM_GCR_SYS_CONFIG2_MAXVPW_MSK		(_ULCAST_(0xf) << 0)
+
 /* GCR_Cx_COHERENCE register fields */
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF	0
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK	(_ULCAST_(0xff) << 0)
@@ -405,4 +410,38 @@
 	return read_gcr_rev();
 }
 
+/**
+ * mips_cm_max_vp_width() - return the width in bits of VP indices
+ *
+ * Return: the width, in bits, of VP indices in fields that combine core & VP
+ * indices.
+ */
+static inline unsigned int mips_cm_max_vp_width(void)
+{
+	extern int smp_num_siblings;
+
+	if (mips_cm_revision() >= CM_REV_CM3)
+		return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
+
+	return smp_num_siblings;
+}
+
+/**
+ * mips_cm_vp_id() - calculate the hardware VP ID for a CPU
+ * @cpu: the CPU whose VP ID to calculate
+ *
+ * Hardware such as the GIC uses identifiers for VPs which may not match the
+ * CPU numbers used by Linux. This function calculates the hardware VP
+ * identifier corresponding to a given CPU.
+ *
+ * Return: the VP ID for the CPU.
+ */
+static inline unsigned int mips_cm_vp_id(unsigned int cpu)
+{
+	unsigned int core = cpu_data[cpu].core;
+	unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
+
+	return (core * mips_cm_max_vp_width()) + vp;
+}
+
 #endif /* __MIPS_ASM_MIPS_CM_H__ */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index d3cd8ea..c64781c 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -487,6 +487,8 @@
 
 /* Bits specific to the MIPS32/64 PRA.	*/
 #define MIPS_CONF_MT		(_ULCAST_(7) <<	 7)
+#define MIPS_CONF_MT_TLB	(_ULCAST_(1) <<  7)
+#define MIPS_CONF_MT_FTLB	(_ULCAST_(4) <<  7)
 #define MIPS_CONF_AR		(_ULCAST_(7) << 10)
 #define MIPS_CONF_AT		(_ULCAST_(3) << 13)
 #define MIPS_CONF_M		(_ULCAST_(1) << 31)
diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h
index c4ddc4f..23cd9b1 100644
--- a/arch/mips/include/uapi/asm/swab.h
+++ b/arch/mips/include/uapi/asm/swab.h
@@ -13,16 +13,15 @@
 
 #define __SWAB_64_THRU_32__
 
-#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) ||		\
-    defined(_MIPS_ARCH_LOONGSON3A)
+#if !defined(__mips16) &&					\
+	((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) ||	\
+	 defined(_MIPS_ARCH_LOONGSON3A))
 
-static inline __attribute__((nomips16)) __attribute_const__
-		__u16 __arch_swab16(__u16 x)
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
 {
 	__asm__(
 	"	.set	push			\n"
 	"	.set	arch=mips32r2		\n"
-	"	.set	nomips16		\n"
 	"	wsbh	%0, %1			\n"
 	"	.set	pop			\n"
 	: "=r" (x)
@@ -32,13 +31,11 @@
 }
 #define __arch_swab16 __arch_swab16
 
-static inline __attribute__((nomips16)) __attribute_const__
-		__u32 __arch_swab32(__u32 x)
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 {
 	__asm__(
 	"	.set	push			\n"
 	"	.set	arch=mips32r2		\n"
-	"	.set	nomips16		\n"
 	"	wsbh	%0, %1			\n"
 	"	rotr	%0, %0, 16		\n"
 	"	.set	pop			\n"
@@ -54,13 +51,11 @@
  * 64-bit kernel on r2 CPUs.
  */
 #ifdef __mips64
-static inline __attribute__((nomips16)) __attribute_const__
-		__u64 __arch_swab64(__u64 x)
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
 {
 	__asm__(
 	"	.set	push			\n"
 	"	.set	arch=mips64r2		\n"
-	"	.set	nomips16		\n"
 	"	dsbh	%0, %1			\n"
 	"	dshd	%0, %0			\n"
 	"	.set	pop			\n"
@@ -71,5 +66,5 @@
 }
 #define __arch_swab64 __arch_swab64
 #endif /* __mips64 */
-#endif /* MIPS R2 or newer or Loongson 3A */
+#endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */
 #endif /* _ASM_SWAB_H */
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index c03088f..cfabadb 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -377,16 +377,18 @@
 #define __NR_memfd_create		(__NR_Linux + 354)
 #define __NR_bpf			(__NR_Linux + 355)
 #define __NR_execveat			(__NR_Linux + 356)
+#define __NR_userfaultfd		(__NR_Linux + 357)
+#define __NR_membarrier			(__NR_Linux + 358)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls		356
+#define __NR_Linux_syscalls		358
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux			4000
-#define __NR_O32_Linux_syscalls		356
+#define __NR_O32_Linux_syscalls		358
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
@@ -711,16 +713,18 @@
 #define __NR_memfd_create		(__NR_Linux + 314)
 #define __NR_bpf			(__NR_Linux + 315)
 #define __NR_execveat			(__NR_Linux + 316)
+#define __NR_userfaultfd		(__NR_Linux + 317)
+#define __NR_membarrier			(__NR_Linux + 318)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls		316
+#define __NR_Linux_syscalls		318
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux			5000
-#define __NR_64_Linux_syscalls		316
+#define __NR_64_Linux_syscalls		318
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
@@ -1049,15 +1053,17 @@
 #define __NR_memfd_create		(__NR_Linux + 318)
 #define __NR_bpf			(__NR_Linux + 319)
 #define __NR_execveat			(__NR_Linux + 320)
+#define __NR_userfaultfd		(__NR_Linux + 321)
+#define __NR_membarrier			(__NR_Linux + 322)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls		320
+#define __NR_Linux_syscalls		322
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux			6000
-#define __NR_N32_Linux_syscalls		320
+#define __NR_N32_Linux_syscalls		322
 
 #endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 4e62bf8..459cb01 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -26,6 +26,7 @@
 #include <linux/power/jz4740-battery.h>
 #include <linux/power/gpio-charger.h>
 
+#include <asm/mach-jz4740/gpio.h>
 #include <asm/mach-jz4740/jz4740_fb.h>
 #include <asm/mach-jz4740/jz4740_mmc.h>
 #include <asm/mach-jz4740/jz4740_nand.h>
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index a74e181..8c6d76c 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -28,6 +28,7 @@
 #include <linux/seq_file.h>
 
 #include <asm/mach-jz4740/base.h>
+#include <asm/mach-jz4740/gpio.h>
 
 #define JZ4740_GPIO_BASE_A (32*0)
 #define JZ4740_GPIO_BASE_B (32*1)
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 9f71c06..209ded1 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -39,6 +39,7 @@
 	 mfc0	\dest, CP0_CONFIG, 3
 	andi	\dest, \dest, MIPS_CONF3_MT
 	beqz	\dest, \nomt
+	 nop
 	.endm
 
 .section .text.cps-vec
@@ -223,10 +224,9 @@
 	END(excep_ejtag)
 
 LEAF(mips_cps_core_init)
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
 	/* Check that the core implements the MT ASE */
 	has_mt	t0, 3f
-	 nop
 
 	.set	push
 	.set	mips64r2
@@ -310,8 +310,9 @@
 	PTR_ADDU t0, t0, t1
 
 	/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+	li	t9, 0
+#ifdef CONFIG_MIPS_MT_SMP
 	has_mt	ta2, 1f
-	 li	t9, 0
 
 	/* Find the number of VPEs present in the core */
 	mfc0	t1, CP0_MVPCONF0
@@ -330,6 +331,7 @@
 	/* Retrieve the VPE ID from EBase.CPUNum */
 	mfc0	t9, $15, 1
 	and	t9, t9, t1
+#endif
 
 1:	/* Calculate a pointer to this VPEs struct vpe_boot_config */
 	li	t1, VPEBOOTCFG_SIZE
@@ -337,7 +339,7 @@
 	PTR_L	ta3, COREBOOTCFG_VPECONFIG(t0)
 	PTR_ADDU v0, v0, ta3
 
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
 
 	/* If the core doesn't support MT then return */
 	bnez	ta2, 1f
@@ -451,7 +453,7 @@
 
 2:	.set	pop
 
-#endif /* CONFIG_MIPS_MT */
+#endif /* CONFIG_MIPS_MT_SMP */
 
 	/* Return */
 	jr	ra
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 571a8e6..09a51d0 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -410,16 +410,18 @@
 static inline unsigned int decode_config0(struct cpuinfo_mips *c)
 {
 	unsigned int config0;
-	int isa;
+	int isa, mt;
 
 	config0 = read_c0_config();
 
 	/*
 	 * Look for Standard TLB or Dual VTLB and FTLB
 	 */
-	if ((((config0 & MIPS_CONF_MT) >> 7) == 1) ||
-	    (((config0 & MIPS_CONF_MT) >> 7) == 4))
+	mt = config0 & MIPS_CONF_MT;
+	if (mt == MIPS_CONF_MT_TLB)
 		c->options |= MIPS_CPU_TLB;
+	else if (mt == MIPS_CONF_MT_FTLB)
+		c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB;
 
 	isa = (config0 & MIPS_CONF_AT) >> 13;
 	switch (isa) {
@@ -559,15 +561,18 @@
 	if (cpu_has_tlb) {
 		if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
 			c->options |= MIPS_CPU_TLBINV;
+
 		/*
-		 * This is a bit ugly. R6 has dropped that field from
-		 * config4 and the only valid configuration is VTLB+FTLB so
-		 * set a good value for mmuextdef for that case.
+		 * R6 has dropped the MMUExtDef field from config4.
+		 * On R6 the fields always describe the FTLB, and only if it is
+		 * present according to Config.MT.
 		 */
-		if (cpu_has_mips_r6)
+		if (!cpu_has_mips_r6)
+			mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+		else if (cpu_has_ftlb)
 			mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
 		else
-			mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+			mmuextdef = 0;
 
 		switch (mmuextdef) {
 		case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 423ae83..3375745 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -18,7 +18,7 @@
 	.set pop
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *		       struct thread_info *next_ti, int usedfpu)
+ *		       struct thread_info *next_ti)
  */
 	.align	7
 	LEAF(resume)
@@ -28,30 +28,6 @@
 	cpu_save_nonscratch a0
 	LONG_S	ra, THREAD_REG31(a0)
 
-	/*
-	 * check if we need to save FPU registers
-	 */
-	.set push
-	.set noreorder
-	beqz	a3, 1f
-	 PTR_L	t3, TASK_THREAD_INFO(a0)
-	.set pop
-
-	/*
-	 * clear saved user stack CU1 bit
-	 */
-	LONG_L	t0, ST_OFF(t3)
-	li	t1, ~ST0_CU1
-	and	t0, t0, t1
-	LONG_S	t0, ST_OFF(t3)
-
-	.set push
-	.set arch=mips64r2
-	fpu_save_double a0 t0 t1		# c0_status passed in t0
-						# clobbers t1
-	.set pop
-1:
-
 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
 	/* Check if we need to store CVMSEG state */
 	dmfc0	t0, $11,7	/* CvmMemCtl */
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 5087a4b..ac27ef7 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -31,18 +31,8 @@
 #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
 
 /*
- * FPU context is saved iff the process has used it's FPU in the current
- * time slice as indicated by TIF_USEDFPU.  In any case, the CU1 bit for user
- * space STATUS register should be 0, so that a process *always* starts its
- * userland with FPU disabled after each context switch.
- *
- * FPU will be enabled as soon as the process accesses FPU again, through
- * do_cpu() trap.
- */
-
-/*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *		       struct thread_info *next_ti, int usedfpu)
+ *		       struct thread_info *next_ti)
  */
 LEAF(resume)
 	mfc0	t1, CP0_STATUS
@@ -50,22 +40,6 @@
 	cpu_save_nonscratch a0
 	sw	ra, THREAD_REG31(a0)
 
-	beqz	a3, 1f
-
-	PTR_L	t3, TASK_THREAD_INFO(a0)
-
-	/*
-	 * clear saved user stack CU1 bit
-	 */
-	lw	t0, ST_OFF(t3)
-	li	t1, ~ST0_CU1
-	and	t0, t0, t1
-	sw	t0, ST_OFF(t3)
-
-	fpu_save_single a0, t0			# clobbers t0
-
-1:
-
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 	PTR_LA	t8, __stack_chk_guard
 	LONG_L	t9, TASK_STACK_CANARY(a1)
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 4cc1350..65a74e4 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -36,16 +36,8 @@
 	lw	t1, PT_EPC(sp)		# skip syscall on return
 
 	subu	v0, v0, __NR_O32_Linux	# check syscall number
-	sltiu	t0, v0, __NR_O32_Linux_syscalls + 1
 	addiu	t1, 4			# skip to next instruction
 	sw	t1, PT_EPC(sp)
-	beqz	t0, illegal_syscall
-
-	sll	t0, v0, 2
-	la	t1, sys_call_table
-	addu	t1, t0
-	lw	t2, (t1)		# syscall routine
-	beqz	t2, illegal_syscall
 
 	sw	a3, PT_R26(sp)		# save a3 for syscall restarting
 
@@ -96,6 +88,16 @@
 	li	t1, _TIF_WORK_SYSCALL_ENTRY
 	and	t0, t1
 	bnez	t0, syscall_trace_entry # -> yes
+syscall_common:
+	sltiu	t0, v0, __NR_O32_Linux_syscalls + 1
+	beqz	t0, illegal_syscall
+
+	sll	t0, v0, 2
+	la	t1, sys_call_table
+	addu	t1, t0
+	lw	t2, (t1)		# syscall routine
+
+	beqz	t2, illegal_syscall
 
 	jalr	t2			# Do The Real Thing (TM)
 
@@ -116,7 +118,7 @@
 
 syscall_trace_entry:
 	SAVE_STATIC
-	move	s0, t2
+	move	s0, v0
 	move	a0, sp
 
 	/*
@@ -129,27 +131,18 @@
 
 1:	jal	syscall_trace_enter
 
-	bltz	v0, 2f			# seccomp failed? Skip syscall
+	bltz	v0, 1f			# seccomp failed? Skip syscall
 
-	move	t0, s0
+	move	v0, s0			# restore syscall
+
 	RESTORE_STATIC
 	lw	a0, PT_R4(sp)		# Restore argument registers
 	lw	a1, PT_R5(sp)
 	lw	a2, PT_R6(sp)
 	lw	a3, PT_R7(sp)
-	jalr	t0
+	j	syscall_common
 
-	li	t0, -EMAXERRNO - 1	# error?
-	sltu	t0, t0, v0
-	sw	t0, PT_R7(sp)		# set error flag
-	beqz	t0, 1f
-
-	lw	t1, PT_R2(sp)		# syscall number
-	negu	v0			# error
-	sw	t1, PT_R0(sp)		# save it for syscall restarting
-1:	sw	v0, PT_R2(sp)		# result
-
-2:	j	syscall_exit
+1:	j	syscall_exit
 
 /* ------------------------------------------------------------------------ */
 
@@ -599,3 +592,5 @@
 	PTR	sys_memfd_create
 	PTR	sys_bpf				/* 4355 */
 	PTR	sys_execveat
+	PTR	sys_userfaultfd
+	PTR	sys_membarrier
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index a6f6b76..e732981 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -39,18 +39,11 @@
 	.set	at
 #endif
 
-	dsubu	t0, v0, __NR_64_Linux	# check syscall number
-	sltiu	t0, t0, __NR_64_Linux_syscalls + 1
 #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
 	ld	t1, PT_EPC(sp)		# skip syscall on return
 	daddiu	t1, 4			# skip to next instruction
 	sd	t1, PT_EPC(sp)
 #endif
-	beqz	t0, illegal_syscall
-
-	dsll	t0, v0, 3		# offset into table
-	ld	t2, (sys_call_table - (__NR_64_Linux * 8))(t0)
-					# syscall routine
 
 	sd	a3, PT_R26(sp)		# save a3 for syscall restarting
 
@@ -59,6 +52,17 @@
 	and	t0, t1, t0
 	bnez	t0, syscall_trace_entry
 
+syscall_common:
+	dsubu	t2, v0, __NR_64_Linux
+	sltiu   t0, t2, __NR_64_Linux_syscalls + 1
+	beqz	t0, illegal_syscall
+
+	dsll	t0, t2, 3		# offset into table
+	dla	t2, sys_call_table
+	daddu	t0, t2, t0
+	ld	t2, (t0)		# syscall routine
+	beqz	t2, illegal_syscall
+
 	jalr	t2			# Do The Real Thing (TM)
 
 	li	t0, -EMAXERRNO - 1	# error?
@@ -78,14 +82,14 @@
 
 syscall_trace_entry:
 	SAVE_STATIC
-	move	s0, t2
+	move	s0, v0
 	move	a0, sp
 	move	a1, v0
 	jal	syscall_trace_enter
 
-	bltz	v0, 2f			# seccomp failed? Skip syscall
+	bltz	v0, 1f			# seccomp failed? Skip syscall
 
-	move	t0, s0
+	move	v0, s0
 	RESTORE_STATIC
 	ld	a0, PT_R4(sp)		# Restore argument registers
 	ld	a1, PT_R5(sp)
@@ -93,19 +97,9 @@
 	ld	a3, PT_R7(sp)
 	ld	a4, PT_R8(sp)
 	ld	a5, PT_R9(sp)
-	jalr	t0
+	j	syscall_common
 
-	li	t0, -EMAXERRNO - 1	# error?
-	sltu	t0, t0, v0
-	sd	t0, PT_R7(sp)		# set error flag
-	beqz	t0, 1f
-
-	ld	t1, PT_R2(sp)		# syscall number
-	dnegu	v0			# error
-	sd	t1, PT_R0(sp)		# save it for syscall restarting
-1:	sd	v0, PT_R2(sp)		# result
-
-2:	j	syscall_exit
+1:	j	syscall_exit
 
 illegal_syscall:
 	/* This also isn't a 64-bit syscall, throw an error.  */
@@ -436,4 +430,6 @@
 	PTR	sys_memfd_create
 	PTR	sys_bpf				/* 5315 */
 	PTR	sys_execveat
+	PTR	sys_userfaultfd
+	PTR	sys_membarrier
 	.size	sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 4b20106..c794843 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -52,6 +52,7 @@
 	and	t0, t1, t0
 	bnez	t0, n32_syscall_trace_entry
 
+syscall_common:
 	jalr	t2			# Do The Real Thing (TM)
 
 	li	t0, -EMAXERRNO - 1	# error?
@@ -75,9 +76,9 @@
 	move	a1, v0
 	jal	syscall_trace_enter
 
-	bltz	v0, 2f			# seccomp failed? Skip syscall
+	bltz	v0, 1f			# seccomp failed? Skip syscall
 
-	move	t0, s0
+	move	t2, s0
 	RESTORE_STATIC
 	ld	a0, PT_R4(sp)		# Restore argument registers
 	ld	a1, PT_R5(sp)
@@ -85,19 +86,9 @@
 	ld	a3, PT_R7(sp)
 	ld	a4, PT_R8(sp)
 	ld	a5, PT_R9(sp)
-	jalr	t0
+	j	syscall_common
 
-	li	t0, -EMAXERRNO - 1	# error?
-	sltu	t0, t0, v0
-	sd	t0, PT_R7(sp)		# set error flag
-	beqz	t0, 1f
-
-	ld	t1, PT_R2(sp)		# syscall number
-	dnegu	v0			# error
-	sd	t1, PT_R0(sp)		# save it for syscall restarting
-1:	sd	v0, PT_R2(sp)		# result
-
-2:	j	syscall_exit
+1:	j	syscall_exit
 
 not_n32_scall:
 	/* This is not an n32 compatibility syscall, pass it on to
@@ -429,4 +420,6 @@
 	PTR	sys_memfd_create
 	PTR	sys_bpf
 	PTR	compat_sys_execveat		/* 6320 */
+	PTR	sys_userfaultfd
+	PTR	sys_membarrier
 	.size	sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index f543ff4..6369cfd 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -87,6 +87,7 @@
 	and	t0, t1, t0
 	bnez	t0, trace_a_syscall
 
+syscall_common:
 	jalr	t2			# Do The Real Thing (TM)
 
 	li	t0, -EMAXERRNO - 1	# error?
@@ -130,9 +131,9 @@
 
 1:	jal	syscall_trace_enter
 
-	bltz	v0, 2f			# seccomp failed? Skip syscall
+	bltz	v0, 1f			# seccomp failed? Skip syscall
 
-	move	t0, s0
+	move	t2, s0
 	RESTORE_STATIC
 	ld	a0, PT_R4(sp)		# Restore argument registers
 	ld	a1, PT_R5(sp)
@@ -142,19 +143,9 @@
 	ld	a5, PT_R9(sp)
 	ld	a6, PT_R10(sp)
 	ld	a7, PT_R11(sp)		# For indirect syscalls
-	jalr	t0
+	j	syscall_common
 
-	li	t0, -EMAXERRNO - 1	# error?
-	sltu	t0, t0, v0
-	sd	t0, PT_R7(sp)		# set error flag
-	beqz	t0, 1f
-
-	ld	t1, PT_R2(sp)		# syscall number
-	dnegu	v0			# error
-	sd	t1, PT_R0(sp)		# save it for syscall restarting
-1:	sd	v0, PT_R2(sp)		# result
-
-2:	j	syscall_exit
+1:	j	syscall_exit
 
 /* ------------------------------------------------------------------------ */
 
@@ -584,4 +575,6 @@
 	PTR	sys_memfd_create
 	PTR	sys_bpf				/* 4355 */
 	PTR	compat_sys_execveat
+	PTR	sys_userfaultfd
+	PTR	sys_membarrier
 	.size	sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 35b8316..4795151 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -338,7 +338,7 @@
 		if (end <= reserved_end)
 			continue;
 #ifdef CONFIG_BLK_DEV_INITRD
-		/* mapstart should be after initrd_end */
+		/* Skip zones before initrd and initrd itself */
 		if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
 			continue;
 #endif
@@ -371,6 +371,14 @@
 		max_low_pfn = PFN_DOWN(HIGHMEM_START);
 	}
 
+#ifdef CONFIG_BLK_DEV_INITRD
+	/*
+	 * mapstart should be after initrd_end
+	 */
+	if (initrd_end)
+		mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
+#endif
+
 	/*
 	 * Initialize the boot-time allocator with low memory only.
 	 */
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index a31896c..bd4385a 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -42,6 +42,7 @@
 #include <asm/mmu_context.h>
 #include <asm/time.h>
 #include <asm/setup.h>
+#include <asm/maar.h>
 
 cpumask_t cpu_callin_map;		/* Bitmask of started secondaries */
 
@@ -157,6 +158,7 @@
 	mips_clockevent_init();
 	mp_ops->init_secondary();
 	cpu_report();
+	maar_init();
 
 	/*
 	 * XXX parity protection should be folded in here when it's converted
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index f6c44dd..d6d07ad 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -64,6 +64,9 @@
 	}
 	if (memsize == 0)
 		memsize = 256;
+
+	loongson_sysconf.nr_uarts = 1;
+
 	pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
 #else
 	struct boot_params *boot_p;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index a914dc1..d8117be 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -100,7 +100,7 @@
 	else
 #endif
 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
-	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+	     if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
 		dma_flag = __GFP_DMA;
 	else
 #endif
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 66d0f49..8770e61 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -44,6 +44,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
 #include <asm/fixmap.h>
+#include <asm/maar.h>
 
 /*
  * We have up to 8 empty zeroed pages so we can map one of the right colour
@@ -252,6 +253,119 @@
 #endif
 }
 
+unsigned __weak platform_maar_init(unsigned num_pairs)
+{
+	struct maar_config cfg[BOOT_MEM_MAP_MAX];
+	unsigned i, num_configured, num_cfg = 0;
+	phys_addr_t skip;
+
+	for (i = 0; i < boot_mem_map.nr_map; i++) {
+		switch (boot_mem_map.map[i].type) {
+		case BOOT_MEM_RAM:
+		case BOOT_MEM_INIT_RAM:
+			break;
+		default:
+			continue;
+		}
+
+		skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
+
+		cfg[num_cfg].lower = boot_mem_map.map[i].addr;
+		cfg[num_cfg].lower += skip;
+
+		cfg[num_cfg].upper = cfg[num_cfg].lower;
+		cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
+		cfg[num_cfg].upper -= skip;
+
+		cfg[num_cfg].attrs = MIPS_MAAR_S;
+		num_cfg++;
+	}
+
+	num_configured = maar_config(cfg, num_cfg, num_pairs);
+	if (num_configured < num_cfg)
+		pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
+			num_pairs, num_cfg);
+
+	return num_configured;
+}
+
+void maar_init(void)
+{
+	unsigned num_maars, used, i;
+	phys_addr_t lower, upper, attr;
+	static struct {
+		struct maar_config cfgs[3];
+		unsigned used;
+	} recorded = { { { 0 } }, 0 };
+
+	if (!cpu_has_maar)
+		return;
+
+	/* Detect the number of MAARs */
+	write_c0_maari(~0);
+	back_to_back_c0_hazard();
+	num_maars = read_c0_maari() + 1;
+
+	/* MAARs should be in pairs */
+	WARN_ON(num_maars % 2);
+
+	/* Set MAARs using values we recorded already */
+	if (recorded.used) {
+		used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
+		BUG_ON(used != recorded.used);
+	} else {
+		/* Configure the required MAARs */
+		used = platform_maar_init(num_maars / 2);
+	}
+
+	/* Disable any further MAARs */
+	for (i = (used * 2); i < num_maars; i++) {
+		write_c0_maari(i);
+		back_to_back_c0_hazard();
+		write_c0_maar(0);
+		back_to_back_c0_hazard();
+	}
+
+	if (recorded.used)
+		return;
+
+	pr_info("MAAR configuration:\n");
+	for (i = 0; i < num_maars; i += 2) {
+		write_c0_maari(i);
+		back_to_back_c0_hazard();
+		upper = read_c0_maar();
+
+		write_c0_maari(i + 1);
+		back_to_back_c0_hazard();
+		lower = read_c0_maar();
+
+		attr = lower & upper;
+		lower = (lower & MIPS_MAAR_ADDR) << 4;
+		upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
+
+		pr_info("  [%d]: ", i / 2);
+		if (!(attr & MIPS_MAAR_V)) {
+			pr_cont("disabled\n");
+			continue;
+		}
+
+		pr_cont("%pa-%pa", &lower, &upper);
+
+		if (attr & MIPS_MAAR_S)
+			pr_cont(" speculate");
+
+		pr_cont("\n");
+
+		/* Record the setup for use on secondary CPUs */
+		if (used <= ARRAY_SIZE(recorded.cfgs)) {
+			recorded.cfgs[recorded.used].lower = lower;
+			recorded.cfgs[recorded.used].upper = upper;
+			recorded.cfgs[recorded.used].attrs = attr;
+			recorded.used++;
+		}
+	}
+}
+
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 int page_is_ram(unsigned long pagenr)
 {
@@ -334,69 +448,6 @@
 #endif
 }
 
-unsigned __weak platform_maar_init(unsigned num_pairs)
-{
-	struct maar_config cfg[BOOT_MEM_MAP_MAX];
-	unsigned i, num_configured, num_cfg = 0;
-	phys_addr_t skip;
-
-	for (i = 0; i < boot_mem_map.nr_map; i++) {
-		switch (boot_mem_map.map[i].type) {
-		case BOOT_MEM_RAM:
-		case BOOT_MEM_INIT_RAM:
-			break;
-		default:
-			continue;
-		}
-
-		skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
-
-		cfg[num_cfg].lower = boot_mem_map.map[i].addr;
-		cfg[num_cfg].lower += skip;
-
-		cfg[num_cfg].upper = cfg[num_cfg].lower;
-		cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
-		cfg[num_cfg].upper -= skip;
-
-		cfg[num_cfg].attrs = MIPS_MAAR_S;
-		num_cfg++;
-	}
-
-	num_configured = maar_config(cfg, num_cfg, num_pairs);
-	if (num_configured < num_cfg)
-		pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
-			num_pairs, num_cfg);
-
-	return num_configured;
-}
-
-static void maar_init(void)
-{
-	unsigned num_maars, used, i;
-
-	if (!cpu_has_maar)
-		return;
-
-	/* Detect the number of MAARs */
-	write_c0_maari(~0);
-	back_to_back_c0_hazard();
-	num_maars = read_c0_maari() + 1;
-
-	/* MAARs should be in pairs */
-	WARN_ON(num_maars % 2);
-
-	/* Configure the required MAARs */
-	used = platform_maar_init(num_maars / 2);
-
-	/* Disable any further MAARs */
-	for (i = (used * 2); i < num_maars; i++) {
-		write_c0_maari(i);
-		back_to_back_c0_hazard();
-		write_c0_maar(0);
-		back_to_back_c0_hazard();
-	}
-}
-
 void __init mem_init(void)
 {
 #ifdef CONFIG_HIGHMEM
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 0c4a133..77cb273 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1251,7 +1251,7 @@
 		bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
 
 	fp->bpf_func = (void *)ctx.target;
-	fp->jited = true;
+	fp->jited = 1;
 
 out:
 	kfree(ctx.offsets);
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
index e927260..5d2e0c8 100644
--- a/arch/mips/net/bpf_jit_asm.S
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -57,15 +57,28 @@
 
 LEAF(sk_load_word)
 	is_offset_negative(word)
-	.globl sk_load_word_positive
-sk_load_word_positive:
+FEXPORT(sk_load_word_positive)
 	is_offset_in_header(4, word)
 	/* Offset within header boundaries */
 	PTR_ADDU t1, $r_skb_data, offset
+	.set	reorder
 	lw	$r_A, 0(t1)
+	.set	noreorder
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
 	wsbh	t0, $r_A
 	rotr	$r_A, t0, 16
+# else
+	sll	t0, $r_A, 24
+	srl	t1, $r_A, 24
+	srl	t2, $r_A, 8
+	or	t0, t0, t1
+	andi	t2, t2, 0xff00
+	andi	t1, $r_A, 0xff00
+	or	t0, t0, t2
+	sll	t1, t1, 8
+	or	$r_A, t0, t1
+# endif
 #endif
 	jr	$r_ra
 	 move	$r_ret, zero
@@ -73,15 +86,24 @@
 
 LEAF(sk_load_half)
 	is_offset_negative(half)
-	.globl sk_load_half_positive
-sk_load_half_positive:
+FEXPORT(sk_load_half_positive)
 	is_offset_in_header(2, half)
 	/* Offset within header boundaries */
 	PTR_ADDU t1, $r_skb_data, offset
+	.set	reorder
 	lh	$r_A, 0(t1)
+	.set	noreorder
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
 	wsbh	t0, $r_A
 	seh	$r_A, t0
+# else
+	sll	t0, $r_A, 24
+	andi	t1, $r_A, 0xff00
+	sra	t0, t0, 16
+	srl	t1, t1, 8
+	or	$r_A, t0, t1
+# endif
 #endif
 	jr	$r_ra
 	 move	$r_ret, zero
@@ -89,8 +111,7 @@
 
 LEAF(sk_load_byte)
 	is_offset_negative(byte)
-	.globl sk_load_byte_positive
-sk_load_byte_positive:
+FEXPORT(sk_load_byte_positive)
 	is_offset_in_header(1, byte)
 	/* Offset within header boundaries */
 	PTR_ADDU t1, $r_skb_data, offset
@@ -148,23 +169,47 @@
 NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
 	bpf_slow_path_common(4)
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
 	wsbh	t0, $r_s0
 	jr	$r_ra
 	 rotr	$r_A, t0, 16
-#endif
+# else
+	sll	t0, $r_s0, 24
+	srl	t1, $r_s0, 24
+	srl	t2, $r_s0, 8
+	or	t0, t0, t1
+	andi	t2, t2, 0xff00
+	andi	t1, $r_s0, 0xff00
+	or	t0, t0, t2
+	sll	t1, t1, 8
 	jr	$r_ra
-	move	$r_A, $r_s0
+	 or	$r_A, t0, t1
+# endif
+#else
+	jr	$r_ra
+	 move	$r_A, $r_s0
+#endif
 
 	END(bpf_slow_path_word)
 
 NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
 	bpf_slow_path_common(2)
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
 	jr	$r_ra
 	 wsbh	$r_A, $r_s0
-#endif
+# else
+	sll	t0, $r_s0, 8
+	andi	t1, $r_s0, 0xff00
+	andi	t0, t0, 0xff00
+	srl	t1, t1, 8
+	jr	$r_ra
+	 or	$r_A, t0, t1
+# endif
+#else
 	jr	$r_ra
 	 move	$r_A, $r_s0
+#endif
 
 	END(bpf_slow_path_half)
 
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index 6edb9ee..1c8dd0f 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -9,3 +9,4 @@
 generic-y += preempt.h
 generic-y += sections.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 914864e..d63330e 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -61,4 +61,5 @@
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
index 426bf41..5f51b7b 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -224,10 +224,12 @@
 
 /include/ "pq3-etsec2-0.dtsi"
 	enet0: enet0_grp2: ethernet@b0000 {
+		fsl,wake-on-filer;
 	};
 
 /include/ "pq3-etsec2-1.dtsi"
 	enet1: enet1_grp2: ethernet@b1000 {
+		fsl,wake-on-filer;
 	};
 
 	global-utilities@e0000 {
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 6bc0ee4..2c041b5 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -111,7 +111,7 @@
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_LPFC=m
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_ALUA=m
 CONFIG_ATA=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 7991f37..36871a4 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -114,7 +114,7 @@
 CONFIG_SCSI_QLA_ISCSI=m
 CONFIG_SCSI_LPFC=m
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_ALUA=m
 CONFIG_ATA=y
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index cab6753..3f191f5 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -61,8 +61,13 @@
 					       unsigned long addr,
 					       unsigned char *hpte_slot_array,
 					       int psize, int ssize, int local);
-	/* special for kexec, to be called in real mode, linear mapping is
-	 * destroyed as well */
+	/*
+	 * Special for kexec.
+	 * To be called in real mode with interrupts disabled. No locks are
+	 * taken as such, concurrent access on pre POWER5 hardware could result
+	 * in a deadlock.
+	 * The linear mapping is destroyed as well.
+	 */
 	void		(*hpte_clear_all)(void);
 
 	void __iomem *	(*ioremap)(phys_addr_t addr, unsigned long size,
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index 5b3a903..e4396a7 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -40,6 +40,11 @@
 	return (val + c->high_bits) & ~rhs;
 }
 
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+	return ~1ul << __fls(mask);
+}
+
 #else
 
 #ifdef CONFIG_64BIT
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 13befa35..c8822af 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -582,13 +582,21 @@
  * be when they isi), and we are the only one left.  We rely on our kernel
  * mapping being 0xC0's and the hardware ignoring those two real bits.
  *
+ * This must be called with interrupts disabled.
+ *
+ * Taking the native_tlbie_lock is unsafe here due to the possibility of
+ * lockdep being on. On pre POWER5 hardware, not taking the lock could
+ * cause deadlock. POWER5 and newer not taking the lock is fine. This only
+ * gets called during boot before secondary CPUs have come up and during
+ * crashdump and all bets are off anyway.
+ *
  * TODO: add batching support when enabled.  remember, no dynamic memory here,
  * athough there is the control page available...
  */
 static void native_hpte_clear(void)
 {
 	unsigned long vpn = 0;
-	unsigned long slot, slots, flags;
+	unsigned long slot, slots;
 	struct hash_pte *hptep = htab_address;
 	unsigned long hpte_v;
 	unsigned long pteg_count;
@@ -596,13 +604,6 @@
 
 	pteg_count = htab_hash_mask + 1;
 
-	local_irq_save(flags);
-
-	/* we take the tlbie lock and hold it.  Some hardware will
-	 * deadlock if we try to tlbie from two processors at once.
-	 */
-	raw_spin_lock(&native_tlbie_lock);
-
 	slots = pteg_count * HPTES_PER_GROUP;
 
 	for (slot = 0; slot < slots; slot++, hptep++) {
@@ -614,8 +615,8 @@
 		hpte_v = be64_to_cpu(hptep->v);
 
 		/*
-		 * Call __tlbie() here rather than tlbie() since we
-		 * already hold the native_tlbie_lock.
+		 * Call __tlbie() here rather than tlbie() since we can't take the
+		 * native_tlbie_lock.
 		 */
 		if (hpte_v & HPTE_V_VALID) {
 			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
@@ -625,8 +626,6 @@
 	}
 
 	asm volatile("eieio; tlbsync; ptesync":::"memory");
-	raw_spin_unlock(&native_tlbie_lock);
-	local_irq_restore(flags);
 }
 
 /*
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 17cea18..0478216 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -679,7 +679,7 @@
 		((u64 *)image)[1] = local_paca->kernel_toc;
 #endif
 		fp->bpf_func = (void *)image;
-		fp->jited = true;
+		fp->jited = 1;
 	}
 out:
 	kfree(addrs);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 230f3a7..4296d55 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -487,9 +487,12 @@
 	 *    PRD component would have already got notified about this
 	 *    error through other channels.
 	 *
-	 * In any case, let us just fall through. We anyway heading
-	 * down to panic path.
+	 * If hardware marked this as an unrecoverable MCE, we are
+	 * going to panic anyway. Even if it didn't, it's not safe to
+	 * continue at this point, so we should explicitly panic.
 	 */
+
+	panic("PowerNV Unrecovered Machine Check");
 	return 0;
 }
 
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index 0978713..3db53e8 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -194,11 +194,6 @@
 	.key = OS_AREA_DB_KEY_RTC_DIFF
 };
 
-static const struct os_area_db_id os_area_db_id_video_mode = {
-	.owner = OS_AREA_DB_OWNER_LINUX,
-	.key = OS_AREA_DB_KEY_VIDEO_MODE
-};
-
 #define SECONDS_FROM_1970_TO_2000 946684800LL
 
 /**
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index d478811..fac6ac9 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -10,7 +10,7 @@
 
 KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
+KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
 KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 0c98f15..ed7da28 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -381,7 +381,7 @@
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 82083e1..9858b14 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -377,7 +377,7 @@
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index c05c9e0..7f14f80 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -377,7 +377,7 @@
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 5ad26dd..9043d2e 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -6,3 +6,4 @@
 generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h
index 2a0efc6..dc19ee0 100644
--- a/arch/s390/include/asm/numa.h
+++ b/arch/s390/include/asm/numa.h
@@ -19,7 +19,7 @@
 int __node_distance(int a, int b);
 void numa_update_cpu_topology(void);
 
-extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
 extern int numa_debug_enabled;
 
 #else
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 27ebde6..94fc55f 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -68,7 +68,7 @@
 #define cpumask_of_node cpumask_of_node
 static inline const struct cpumask *cpumask_of_node(int node)
 {
-	return node_to_cpumask_map[node];
+	return &node_to_cpumask_map[node];
 }
 
 /*
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 48c9af7..3aeeb1b 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -176,6 +176,7 @@
 	DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
 	DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
 	DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
+	DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset));
 	DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
 	DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
 	DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 09b039d..582fe44 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -733,6 +733,14 @@
 	stg	%r3,__SF_EMPTY(%r15)
 	larl	%r1,.Lpsw_idle_lpsw+4
 	stg	%r1,__SF_EMPTY+8(%r15)
+#ifdef CONFIG_SMP
+	larl	%r1,smp_cpu_mtid
+	llgf	%r1,0(%r1)
+	ltgr	%r1,%r1
+	jz	.Lpsw_idle_stcctm
+	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
+.Lpsw_idle_stcctm:
+#endif
 	STCK	__CLOCK_IDLE_ENTER(%r2)
 	stpt	__TIMER_IDLE_ENTER(%r2)
 .Lpsw_idle_lpsw:
@@ -1159,7 +1167,27 @@
 	jhe	1f
 	mvc	__CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
 	mvc	__TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1:	# account system time going idle
+1:	# calculate idle cycles
+#ifdef CONFIG_SMP
+	clg	%r9,BASED(.Lcleanup_idle_insn)
+	jl	3f
+	larl	%r1,smp_cpu_mtid
+	llgf	%r1,0(%r1)
+	ltgr	%r1,%r1
+	jz	3f
+	.insn	rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
+	larl	%r3,mt_cycles
+	ag	%r3,__LC_PERCPU_OFFSET
+	la	%r4,__SF_EMPTY+16(%r15)
+2:	lg	%r0,0(%r3)
+	slg	%r0,0(%r4)
+	alg	%r0,64(%r4)
+	stg	%r0,0(%r3)
+	la	%r3,8(%r3)
+	la	%r4,8(%r4)
+	brct	%r1,2b
+#endif
+3:	# account system time going idle
 	lg	%r9,__LC_STEAL_TIMER
 	alg	%r9,__CLOCK_IDLE_ENTER(%r2)
 	slg	%r9,__LC_LAST_UPDATE_CLOCK
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c865343..dafc44f 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -25,7 +25,7 @@
 static atomic64_t virt_timer_current;
 static atomic64_t virt_timer_elapsed;
 
-static DEFINE_PER_CPU(u64, mt_cycles[32]);
+DEFINE_PER_CPU(u64, mt_cycles[8]);
 static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
@@ -60,6 +60,34 @@
 	return elapsed >= atomic64_read(&virt_timer_current);
 }
 
+static void update_mt_scaling(void)
+{
+	u64 cycles_new[8], *cycles_old;
+	u64 delta, fac, mult, div;
+	int i;
+
+	stcctm5(smp_cpu_mtid + 1, cycles_new);
+	cycles_old = this_cpu_ptr(mt_cycles);
+	fac = 1;
+	mult = div = 0;
+	for (i = 0; i <= smp_cpu_mtid; i++) {
+		delta = cycles_new[i] - cycles_old[i];
+		div += delta;
+		mult *= i + 1;
+		mult += delta * fac;
+		fac *= i + 1;
+	}
+	div *= fac;
+	if (div > 0) {
+		/* Update scaling factor */
+		__this_cpu_write(mt_scaling_mult, mult);
+		__this_cpu_write(mt_scaling_div, div);
+		memcpy(cycles_old, cycles_new,
+		       sizeof(u64) * (smp_cpu_mtid + 1));
+	}
+	__this_cpu_write(mt_scaling_jiffies, jiffies_64);
+}
+
 /*
  * Update process times based on virtual cpu times stored by entry.S
  * to the lowcore fields user_timer, system_timer & steal_clock.
@@ -69,7 +97,6 @@
 	struct thread_info *ti = task_thread_info(tsk);
 	u64 timer, clock, user, system, steal;
 	u64 user_scaled, system_scaled;
-	int i;
 
 	timer = S390_lowcore.last_update_timer;
 	clock = S390_lowcore.last_update_clock;
@@ -85,34 +112,10 @@
 	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 	S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
 
-	/* Do MT utilization calculation */
+	/* Update MT utilization calculation */
 	if (smp_cpu_mtid &&
-	    time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
-		u64 cycles_new[32], *cycles_old;
-		u64 delta, fac, mult, div;
-
-		cycles_old = this_cpu_ptr(mt_cycles);
-		if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
-			fac = 1;
-			mult = div = 0;
-			for (i = 0; i <= smp_cpu_mtid; i++) {
-				delta = cycles_new[i] - cycles_old[i];
-				div += delta;
-				mult *= i + 1;
-				mult += delta * fac;
-				fac *= i + 1;
-			}
-			div *= fac;
-			if (div > 0) {
-				/* Update scaling factor */
-				__this_cpu_write(mt_scaling_mult, mult);
-				__this_cpu_write(mt_scaling_div, div);
-				memcpy(cycles_old, cycles_new,
-				       sizeof(u64) * (smp_cpu_mtid + 1));
-			}
-		}
-		__this_cpu_write(mt_scaling_jiffies, jiffies_64);
-	}
+	    time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+		update_mt_scaling();
 
 	user = S390_lowcore.user_timer - ti->user_timer;
 	S390_lowcore.steal_timer -= user;
@@ -181,6 +184,11 @@
 	S390_lowcore.last_update_timer = get_vtimer();
 	S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 
+	/* Update MT utilization calculation */
+	if (smp_cpu_mtid &&
+	    time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+		update_mt_scaling();
+
 	system = S390_lowcore.system_timer - ti->system_timer;
 	S390_lowcore.steal_timer -= system;
 	ti->system_timer = S390_lowcore.system_timer;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index eeda051..9a0c4c2 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1310,7 +1310,7 @@
 	if (jit.prg_buf) {
 		set_memory_ro((unsigned long)header, header->pages);
 		fp->bpf_func = (void *) jit.prg_buf;
-		fp->jited = true;
+		fp->jited = 1;
 	}
 free_addrs:
 	kfree(jit.addrs);
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
index 7de4e2f..30b2698a 100644
--- a/arch/s390/numa/mode_emu.c
+++ b/arch/s390/numa/mode_emu.c
@@ -368,7 +368,7 @@
 		cpumask_copy(&top->thread_mask, &core->mask);
 		cpumask_copy(&top->core_mask, &core_mc(core)->mask);
 		cpumask_copy(&top->book_mask, &core_book(core)->mask);
-		cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]);
+		cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
 		top->node_id = core_node(core)->id;
 	}
 }
@@ -383,7 +383,7 @@
 
 	/* Clear all node masks */
 	for (i = 0; i < MAX_NUMNODES; i++)
-		cpumask_clear(node_to_cpumask_map[i]);
+		cpumask_clear(&node_to_cpumask_map[i]);
 
 	/* Rebuild all masks */
 	toptree_for_each(core, numa, CORE)
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 09b1d23..43f32ce 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -23,7 +23,7 @@
 pg_data_t *node_data[MAX_NUMNODES];
 EXPORT_SYMBOL(node_data);
 
-cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+cpumask_t node_to_cpumask_map[MAX_NUMNODES];
 EXPORT_SYMBOL(node_to_cpumask_map);
 
 const struct numa_mode numa_mode_plain = {
@@ -144,7 +144,7 @@
 static int __init numa_init_early(void)
 {
 	/* Attach all possible CPUs to node 0 for now. */
-	cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask);
+	cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
 	return 0;
 }
 early_initcall(numa_init_early);
diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild
index 92ffe39..a05218f 100644
--- a/arch/score/include/asm/Kbuild
+++ b/arch/score/include/asm/Kbuild
@@ -13,3 +13,4 @@
 generic-y += trace_clock.h
 generic-y += xor.h
 generic-y += serial.h
+generic-y += word-at-a-time.h
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index fe20d14..ceb5201 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -59,6 +59,7 @@
 
 #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
 extern void copy_page(void *to, void *from);
+#define copy_user_page(to, from, vaddr, pg)  __copy_user(to, from, PAGE_SIZE)
 
 struct page;
 struct vm_area_struct;
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index 2e48eb8..c90930d 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -433,6 +433,7 @@
 		.blkcipher = {
 			.min_keysize	= AES_MIN_KEY_SIZE,
 			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
 			.setkey		= aes_set_key,
 			.encrypt	= cbc_encrypt,
 			.decrypt	= cbc_decrypt,
@@ -452,6 +453,7 @@
 		.blkcipher = {
 			.min_keysize	= AES_MIN_KEY_SIZE,
 			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
 			.setkey		= aes_set_key,
 			.encrypt	= ctr_crypt,
 			.decrypt	= ctr_crypt,
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index 6bf2479..561a84d 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -274,6 +274,7 @@
 		.blkcipher = {
 			.min_keysize	= CAMELLIA_MIN_KEY_SIZE,
 			.max_keysize	= CAMELLIA_MAX_KEY_SIZE,
+			.ivsize		= CAMELLIA_BLOCK_SIZE,
 			.setkey		= camellia_set_key,
 			.encrypt	= cbc_encrypt,
 			.decrypt	= cbc_decrypt,
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index dd6a34f..61af794 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -429,6 +429,7 @@
 		.blkcipher = {
 			.min_keysize	= DES_KEY_SIZE,
 			.max_keysize	= DES_KEY_SIZE,
+			.ivsize		= DES_BLOCK_SIZE,
 			.setkey		= des_set_key,
 			.encrypt	= cbc_encrypt,
 			.decrypt	= cbc_decrypt,
@@ -485,6 +486,7 @@
 		.blkcipher = {
 			.min_keysize	= DES3_EDE_KEY_SIZE,
 			.max_keysize	= DES3_EDE_KEY_SIZE,
+			.ivsize		= DES3_EDE_BLOCK_SIZE,
 			.setkey		= des3_ede_set_key,
 			.encrypt	= cbc3_encrypt,
 			.decrypt	= cbc3_decrypt,
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index f8b9f71..22564f5 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -812,7 +812,7 @@
 	if (image) {
 		bpf_flush_icache(image, image + proglen);
 		fp->bpf_func = (void *)image;
-		fp->jited = true;
+		fp->jited = 1;
 	}
 out:
 	kfree(addrs);
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
index ee186e1..f102048 100644
--- a/arch/tile/gxio/mpipe.c
+++ b/arch/tile/gxio/mpipe.c
@@ -19,6 +19,7 @@
 #include <linux/errno.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/string.h>
 
 #include <gxio/iorpc_globals.h>
 #include <gxio/iorpc_mpipe.h>
@@ -29,32 +30,6 @@
 /* HACK: Avoid pointless "shadow" warnings. */
 #define link link_shadow
 
-/**
- * strscpy - Copy a C-string into a sized buffer, but only if it fits
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @size: size of destination buffer
- *
- * Use this routine to avoid copying too-long strings.
- * The routine returns the total number of bytes copied
- * (including the trailing NUL) or zero if the buffer wasn't
- * big enough.  To ensure that programmers pay attention
- * to the return code, the destination has a single NUL
- * written at the front (if size is non-zero) when the
- * buffer is not big enough.
- */
-static size_t strscpy(char *dest, const char *src, size_t size)
-{
-	size_t len = strnlen(src, size) + 1;
-	if (len > size) {
-		if (size)
-			dest[0] = '\0';
-		return 0;
-	}
-	memcpy(dest, src, len);
-	return len;
-}
-
 int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
 {
 	char file[32];
@@ -540,7 +515,7 @@
 	if (!context)
 		return GXIO_ERR_NO_DEVICE;
 
-	if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+	if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
 		return GXIO_ERR_NO_DEVICE;
 
 	return gxio_mpipe_info_instance_aux(context, name);
@@ -559,7 +534,7 @@
 
 	rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
 	if (rv >= 0) {
-		if (strscpy(link_name, name.name, sizeof(name.name)) == 0)
+		if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
 			return GXIO_ERR_INVAL_MEMORY_SIZE;
 		memcpy(link_mac, mac.mac, sizeof(mac.mac));
 	}
@@ -576,7 +551,7 @@
 	_gxio_mpipe_link_name_t name;
 	int rv;
 
-	if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+	if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
 		return GXIO_ERR_NO_DEVICE;
 
 	rv = gxio_mpipe_link_open_aux(context, name, flags);
diff --git a/arch/tile/include/asm/word-at-a-time.h b/arch/tile/include/asm/word-at-a-time.h
index 9e5ce0d..b66a693 100644
--- a/arch/tile/include/asm/word-at-a-time.h
+++ b/arch/tile/include/asm/word-at-a-time.h
@@ -6,7 +6,7 @@
 struct word_at_a_time { /* unused */ };
 #define WORD_AT_A_TIME_CONSTANTS {}
 
-/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */
+/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */
 static inline unsigned long has_zero(unsigned long val, unsigned long *data,
 				     const struct word_at_a_time *c)
 {
@@ -33,4 +33,10 @@
 #endif
 }
 
+#ifdef __BIG_ENDIAN
+#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask)))
+#else
+#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1)
+#endif
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c
index f0da5a2..9f1e05e 100644
--- a/arch/tile/kernel/usb.c
+++ b/arch/tile/kernel/usb.c
@@ -22,6 +22,7 @@
 #include <linux/platform_device.h>
 #include <linux/usb/tilegx.h>
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/types.h>
 
 static u64 ehci_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 149ec55..904f3eb 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -25,4 +25,5 @@
 generic-y += switch_to.h
 generic-y += topology.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 1fc7a28..256c45b 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -62,4 +62,5 @@
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 328c835..96d058a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1308,6 +1308,7 @@
 config X86_PAE
 	bool "PAE (Physical Address Extension) Support"
 	depends on X86_32 && !HIGHMEM4G
+	select SWIOTLB
 	---help---
 	  PAE is required for NX support, and furthermore enables
 	  larger swapspace support for non-overcommit purposes. It
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 80a0e43..bacaa13 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -554,6 +554,11 @@
 {
 	const char *feature_name;
 
+	if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
+		pr_info("AVX or AES-NI instructions are not detected.\n");
+		return -ENODEV;
+	}
+
 	if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
 		pr_info("CPU feature '%s' is not supported.\n", feature_name);
 		return -ENODEV;
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d303318..055a01d 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1128,7 +1128,18 @@
 
 /* Runs on exception stack */
 ENTRY(nmi)
+	/*
+	 * Fix up the exception frame if we're on Xen.
+	 * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
+	 * one value to the stack on native, so it may clobber the rdx
+	 * scratch slot, but it won't clobber any of the important
+	 * slots past it.
+	 *
+	 * Xen is a different story, because the Xen frame itself overlaps
+	 * the "NMI executing" variable.
+	 */
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
+
 	/*
 	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
 	 * the iretq it performs will take us out of NMI context.
@@ -1179,9 +1190,12 @@
 	 * we don't want to enable interrupts, because then we'll end
 	 * up in an awkward situation in which IRQs are on but NMIs
 	 * are off.
+	 *
+	 * We also must not push anything to the stack before switching
+	 * stacks lest we corrupt the "NMI executing" variable.
 	 */
 
-	SWAPGS
+	SWAPGS_UNSAFE_STACK
 	cld
 	movq	%rsp, %rdx
 	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index e6cf2ad..9727b3b 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -193,7 +193,7 @@
 #define X86_FEATURE_HW_PSTATE	( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_HWP		( 7*32+ 10) /* "hwp" Intel HWP */
-#define X86_FEATURE_HWP_NOITFY	( 7*32+ 11) /* Intel HWP_NOTIFY */
+#define X86_FEATURE_HWP_NOTIFY	( 7*32+ 11) /* Intel HWP_NOTIFY */
 #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
 #define X86_FEATURE_HWP_EPP	( 7*32+13) /* Intel HWP_EPP */
 #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index ab5f1d4..ae68be9 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -86,6 +86,7 @@
 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
 					u32 type, u64 attribute);
 
+#ifdef CONFIG_KASAN
 /*
  * CONFIG_KASAN may redefine memset to __memset.  __memset function is present
  * only in kernel binary.  Since the EFI stub linked into a separate binary it
@@ -95,6 +96,7 @@
 #undef memcpy
 #undef memset
 #undef memmove
+#endif
 
 #endif /* CONFIG_X86_32 */
 
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2beee03..3a36ee7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1226,10 +1226,8 @@
 
 int kvm_is_in_guest(void);
 
-int __x86_set_memory_region(struct kvm *kvm,
-			    const struct kvm_userspace_memory_region *mem);
-int x86_set_memory_region(struct kvm *kvm,
-			  const struct kvm_userspace_memory_region *mem);
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size);
 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
 
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index b98b471..b8c14bb 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -141,6 +141,8 @@
 #define DEBUGCTLMSR_BTS_OFF_USR		(1UL << 10)
 #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI	(1UL << 11)
 
+#define MSR_PEBS_FRONTEND		0x000003f7
+
 #define MSR_IA32_POWER_CTL		0x000001fc
 
 #define MSR_IA32_MC0_CTL		0x00000400
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h
index 655e07a..67f0823 100644
--- a/arch/x86/include/asm/pvclock-abi.h
+++ b/arch/x86/include/asm/pvclock-abi.h
@@ -41,6 +41,7 @@
 
 #define PVCLOCK_TSC_STABLE_BIT	(1 << 0)
 #define PVCLOCK_GUEST_STOPPED	(1 << 1)
+/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */
 #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2)
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 83aea80..4c20dd3 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -336,10 +336,10 @@
 	return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
 }
 
-static inline int
+static inline long
 HYPERVISOR_memory_op(unsigned int cmd, void *arg)
 {
-	return _hypercall2(int, memory_op, cmd, arg);
+	return _hypercall2(long, memory_op, cmd, arg);
 }
 
 static inline int
diff --git a/arch/x86/include/uapi/asm/bitsperlong.h b/arch/x86/include/uapi/asm/bitsperlong.h
index b0ae1c4..217909b 100644
--- a/arch/x86/include/uapi/asm/bitsperlong.h
+++ b/arch/x86/include/uapi/asm/bitsperlong.h
@@ -1,7 +1,7 @@
 #ifndef __ASM_X86_BITSPERLONG_H
 #define __ASM_X86_BITSPERLONG_H
 
-#ifdef __x86_64__
+#if defined(__x86_64__) && !defined(__ILP32__)
 # define __BITS_PER_LONG 64
 #else
 # define __BITS_PER_LONG 32
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 381c8b9..20e242e 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -34,11 +34,10 @@
 struct ms_hyperv_info ms_hyperv;
 EXPORT_SYMBOL_GPL(ms_hyperv);
 
-static void (*hv_kexec_handler)(void);
-static void (*hv_crash_handler)(struct pt_regs *regs);
-
 #if IS_ENABLED(CONFIG_HYPERV)
 static void (*vmbus_handler)(void);
+static void (*hv_kexec_handler)(void);
+static void (*hv_crash_handler)(struct pt_regs *regs);
 
 void hyperv_vector_handler(struct pt_regs *regs)
 {
@@ -96,8 +95,8 @@
 	hv_crash_handler = NULL;
 }
 EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
-#endif
 
+#ifdef CONFIG_KEXEC_CORE
 static void hv_machine_shutdown(void)
 {
 	if (kexec_in_progress && hv_kexec_handler)
@@ -111,7 +110,8 @@
 		hv_crash_handler(regs);
 	native_machine_crash_shutdown(regs);
 }
-
+#endif /* CONFIG_KEXEC_CORE */
+#endif /* CONFIG_HYPERV */
 
 static uint32_t  __init ms_hyperv_platform(void)
 {
@@ -186,8 +186,10 @@
 	no_timer_check = 1;
 #endif
 
+#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
 	machine_ops.shutdown = hv_machine_shutdown;
 	machine_ops.crash_shutdown = hv_machine_crash_shutdown;
+#endif
 	mark_tsc_unstable("running on Hyper-V");
 }
 
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 5edf6d8..165be83 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -47,6 +47,7 @@
 	EXTRA_REG_RSP_1 = 1,	/* offcore_response_1 */
 	EXTRA_REG_LBR   = 2,	/* lbr_select */
 	EXTRA_REG_LDLAT = 3,	/* ld_lat_threshold */
+	EXTRA_REG_FE    = 4,    /* fe_* */
 
 	EXTRA_REG_MAX		/* number of entries needed */
 };
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 3fefebf..f63360b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -205,6 +205,11 @@
 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+	/*
+	 * Note the low 8 bits eventsel code is not a continuous field, containing
+	 * some #GPing bits. These are masked out.
+	 */
+	INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
 	EVENT_EXTRA_END
 };
 
@@ -250,7 +255,7 @@
 	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
 	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
 	INTEL_UEVENT_CONSTRAINT(0x148, 0x4),	/* L1D_PEND_MISS.PENDING */
-	INTEL_EVENT_CONSTRAINT(0xa3, 0x4),	/* CYCLE_ACTIVITY.* */
+	INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4),	/* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
 	EVENT_CONSTRAINT_END
 };
 
@@ -2891,6 +2896,8 @@
 
 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
 
+PMU_FORMAT_ATTR(frontend, "config1:0-23");
+
 static struct attribute *intel_arch3_formats_attr[] = {
 	&format_attr_event.attr,
 	&format_attr_umask.attr,
@@ -2907,6 +2914,11 @@
 	NULL,
 };
 
+static struct attribute *skl_format_attr[] = {
+	&format_attr_frontend.attr,
+	NULL,
+};
+
 static __initconst const struct x86_pmu core_pmu = {
 	.name			= "core",
 	.handle_irq		= x86_pmu_handle_irq,
@@ -3516,7 +3528,8 @@
 
 		x86_pmu.hw_config = hsw_hw_config;
 		x86_pmu.get_event_constraints = hsw_get_event_constraints;
-		x86_pmu.cpu_events = hsw_events_attrs;
+		x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
+						  skl_format_attr);
 		WARN_ON(!x86_pmu.format_attrs);
 		x86_pmu.cpu_events = hsw_events_attrs;
 		pr_cont("Skylake events, ");
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c
index 086b12e..f32ac13 100644
--- a/arch/x86/kernel/cpu/perf_event_msr.c
+++ b/arch/x86/kernel/cpu/perf_event_msr.c
@@ -10,12 +10,12 @@
 	PERF_MSR_EVENT_MAX,
 };
 
-bool test_aperfmperf(int idx)
+static bool test_aperfmperf(int idx)
 {
 	return boot_cpu_has(X86_FEATURE_APERFMPERF);
 }
 
-bool test_intel(int idx)
+static bool test_intel(int idx)
 {
 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
 	    boot_cpu_data.x86 != 6)
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 3d423a1..608fb26 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -37,7 +37,7 @@
 		{ X86_FEATURE_PLN,		CR_EAX, 4, 0x00000006, 0 },
 		{ X86_FEATURE_PTS,		CR_EAX, 6, 0x00000006, 0 },
 		{ X86_FEATURE_HWP,		CR_EAX, 7, 0x00000006, 0 },
-		{ X86_FEATURE_HWP_NOITFY,	CR_EAX, 8, 0x00000006, 0 },
+		{ X86_FEATURE_HWP_NOTIFY,	CR_EAX, 8, 0x00000006, 0 },
 		{ X86_FEATURE_HWP_ACT_WINDOW,	CR_EAX, 9, 0x00000006, 0 },
 		{ X86_FEATURE_HWP_EPP,		CR_EAX,10, 0x00000006, 0 },
 		{ X86_FEATURE_HWP_PKG_REQ,	CR_EAX,11, 0x00000006, 0 },
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index e068d66..74ca2fe 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -185,10 +185,9 @@
 }
 
 #ifdef CONFIG_KEXEC_FILE
-static int get_nr_ram_ranges_callback(unsigned long start_pfn,
-				unsigned long nr_pfn, void *arg)
+static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
 {
-	int *nr_ranges = arg;
+	unsigned int *nr_ranges = arg;
 
 	(*nr_ranges)++;
 	return 0;
@@ -214,7 +213,7 @@
 
 	ced->image = image;
 
-	walk_system_ram_range(0, -1, &nr_ranges,
+	walk_system_ram_res(0, -1, &nr_ranges,
 				get_nr_ram_ranges_callback);
 
 	ced->max_nr_ranges = nr_ranges;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index f68e48f..c2130ae 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -41,10 +41,18 @@
 #include <asm/timer.h>
 #include <asm/special_insns.h>
 
-/* nop stub */
-void _paravirt_nop(void)
-{
-}
+/*
+ * nop stub, which must not clobber anything *including the stack* to
+ * avoid confusing the entry prologues.
+ */
+extern void _paravirt_nop(void);
+asm (".pushsection .entry.text, \"ax\"\n"
+     ".global _paravirt_nop\n"
+     "_paravirt_nop:\n\t"
+     "ret\n\t"
+     ".size _paravirt_nop, . - _paravirt_nop\n\t"
+     ".type _paravirt_nop, @function\n\t"
+     ".popsection");
 
 /* identity function, which can be inlined */
 u32 _paravirt_ident_32(u32 x)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6d0e62a..39e585a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -506,3 +506,58 @@
 	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
 }
 
+/*
+ * Called from fs/proc with a reference on @p to find the function
+ * which called into schedule(). This needs to be done carefully
+ * because the task might wake up and we might look at a stack
+ * changing under us.
+ */
+unsigned long get_wchan(struct task_struct *p)
+{
+	unsigned long start, bottom, top, sp, fp, ip;
+	int count = 0;
+
+	if (!p || p == current || p->state == TASK_RUNNING)
+		return 0;
+
+	start = (unsigned long)task_stack_page(p);
+	if (!start)
+		return 0;
+
+	/*
+	 * Layout of the stack page:
+	 *
+	 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
+	 * PADDING
+	 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
+	 * stack
+	 * ----------- bottom = start + sizeof(thread_info)
+	 * thread_info
+	 * ----------- start
+	 *
+	 * The tasks stack pointer points at the location where the
+	 * framepointer is stored. The data on the stack is:
+	 * ... IP FP ... IP FP
+	 *
+	 * We need to read FP and IP, so we need to adjust the upper
+	 * bound by another unsigned long.
+	 */
+	top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
+	top -= 2 * sizeof(unsigned long);
+	bottom = start + sizeof(struct thread_info);
+
+	sp = READ_ONCE(p->thread.sp);
+	if (sp < bottom || sp > top)
+		return 0;
+
+	fp = READ_ONCE(*(unsigned long *)sp);
+	do {
+		if (fp < bottom || fp > top)
+			return 0;
+		ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
+		if (!in_sched_functions(ip))
+			return ip;
+		fp = READ_ONCE(*(unsigned long *)fp);
+	} while (count++ < 16 && p->state != TASK_RUNNING);
+	return 0;
+}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index c13df2c..737527b 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -324,31 +324,3 @@
 
 	return prev_p;
 }
-
-#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
-#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
-
-unsigned long get_wchan(struct task_struct *p)
-{
-	unsigned long bp, sp, ip;
-	unsigned long stack_page;
-	int count = 0;
-	if (!p || p == current || p->state == TASK_RUNNING)
-		return 0;
-	stack_page = (unsigned long)task_stack_page(p);
-	sp = p->thread.sp;
-	if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
-		return 0;
-	/* include/asm-i386/system.h:switch_to() pushes bp last. */
-	bp = *(unsigned long *) sp;
-	do {
-		if (bp < stack_page || bp > top_ebp+stack_page)
-			return 0;
-		ip = *(unsigned long *) (bp+4);
-		if (!in_sched_functions(ip))
-			return ip;
-		bp = *(unsigned long *) bp;
-	} while (count++ < 16);
-	return 0;
-}
-
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3c1bbcf..b35921a 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -499,30 +499,6 @@
 }
 EXPORT_SYMBOL_GPL(set_personality_ia32);
 
-unsigned long get_wchan(struct task_struct *p)
-{
-	unsigned long stack;
-	u64 fp, ip;
-	int count = 0;
-
-	if (!p || p == current || p->state == TASK_RUNNING)
-		return 0;
-	stack = (unsigned long)task_stack_page(p);
-	if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
-		return 0;
-	fp = *(u64 *)(p->thread.sp);
-	do {
-		if (fp < (unsigned long)stack ||
-		    fp >= (unsigned long)stack+THREAD_SIZE)
-			return 0;
-		ip = *(u64 *)(fp+8);
-		if (!in_sched_functions(ip))
-			return ip;
-		fp = *(u64 *)fp;
-	} while (count++ < 16);
-	return 0;
-}
-
 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
 {
 	int ret = 0;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index b372a75..9da95b9 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2418,7 +2418,7 @@
 	u64 val, cr0, cr4;
 	u32 base3;
 	u16 selector;
-	int i;
+	int i, r;
 
 	for (i = 0; i < 16; i++)
 		*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
@@ -2460,13 +2460,17 @@
 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
 	ctxt->ops->set_gdt(ctxt, &dt);
 
+	r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+	if (r != X86EMUL_CONTINUE)
+		return r;
+
 	for (i = 0; i < 6; i++) {
-		int r = rsm_load_seg_64(ctxt, smbase, i);
+		r = rsm_load_seg_64(ctxt, smbase, i);
 		if (r != X86EMUL_CONTINUE)
 			return r;
 	}
 
-	return rsm_enter_protected_mode(ctxt, cr0, cr4);
+	return X86EMUL_CONTINUE;
 }
 
 static int em_rsm(struct x86_emulate_ctxt *ctxt)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 94b7d15..2f9ed1f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -514,7 +514,7 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	if (svm->vmcb->control.next_rip != 0) {
-		WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
+		WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
 		svm->next_rip = svm->vmcb->control.next_rip;
 	}
 
@@ -866,64 +866,6 @@
 	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
 }
 
-#define MTRR_TYPE_UC_MINUS	7
-#define MTRR2PROTVAL_INVALID 0xff
-
-static u8 mtrr2protval[8];
-
-static u8 fallback_mtrr_type(int mtrr)
-{
-	/*
-	 * WT and WP aren't always available in the host PAT.  Treat
-	 * them as UC and UC- respectively.  Everything else should be
-	 * there.
-	 */
-	switch (mtrr)
-	{
-	case MTRR_TYPE_WRTHROUGH:
-		return MTRR_TYPE_UNCACHABLE;
-	case MTRR_TYPE_WRPROT:
-		return MTRR_TYPE_UC_MINUS;
-	default:
-		BUG();
-	}
-}
-
-static void build_mtrr2protval(void)
-{
-	int i;
-	u64 pat;
-
-	for (i = 0; i < 8; i++)
-		mtrr2protval[i] = MTRR2PROTVAL_INVALID;
-
-	/* Ignore the invalid MTRR types.  */
-	mtrr2protval[2] = 0;
-	mtrr2protval[3] = 0;
-
-	/*
-	 * Use host PAT value to figure out the mapping from guest MTRR
-	 * values to nested page table PAT/PCD/PWT values.  We do not
-	 * want to change the host PAT value every time we enter the
-	 * guest.
-	 */
-	rdmsrl(MSR_IA32_CR_PAT, pat);
-	for (i = 0; i < 8; i++) {
-		u8 mtrr = pat >> (8 * i);
-
-		if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
-			mtrr2protval[mtrr] = __cm_idx2pte(i);
-	}
-
-	for (i = 0; i < 8; i++) {
-		if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
-			u8 fallback = fallback_mtrr_type(i);
-			mtrr2protval[i] = mtrr2protval[fallback];
-			BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
-		}
-	}
-}
-
 static __init int svm_hardware_setup(void)
 {
 	int cpu;
@@ -990,7 +932,6 @@
 	} else
 		kvm_disable_tdp();
 
-	build_mtrr2protval();
 	return 0;
 
 err:
@@ -1145,43 +1086,6 @@
 	return target_tsc - tsc;
 }
 
-static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
-{
-	struct kvm_vcpu *vcpu = &svm->vcpu;
-
-	/* Unlike Intel, AMD takes the guest's CR0.CD into account.
-	 *
-	 * AMD does not have IPAT.  To emulate it for the case of guests
-	 * with no assigned devices, just set everything to WB.  If guests
-	 * have assigned devices, however, we cannot force WB for RAM
-	 * pages only, so use the guest PAT directly.
-	 */
-	if (!kvm_arch_has_assigned_device(vcpu->kvm))
-		*g_pat = 0x0606060606060606;
-	else
-		*g_pat = vcpu->arch.pat;
-}
-
-static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
-{
-	u8 mtrr;
-
-	/*
-	 * 1. MMIO: trust guest MTRR, so same as item 3.
-	 * 2. No passthrough: always map as WB, and force guest PAT to WB as well
-	 * 3. Passthrough: can't guarantee the result, try to trust guest.
-	 */
-	if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
-		return 0;
-
-	if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) &&
-	    kvm_read_cr0(vcpu) & X86_CR0_CD)
-		return _PAGE_NOCACHE;
-
-	mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
-	return mtrr2protval[mtrr];
-}
-
 static void init_vmcb(struct vcpu_svm *svm, bool init_event)
 {
 	struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1278,7 +1182,6 @@
 		clr_cr_intercept(svm, INTERCEPT_CR3_READ);
 		clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
 		save->g_pat = svm->vcpu.arch.pat;
-		svm_set_guest_pat(svm, &save->g_pat);
 		save->cr3 = 0;
 		save->cr4 = 0;
 	}
@@ -1673,10 +1576,13 @@
 
 	if (!vcpu->fpu_active)
 		cr0 |= X86_CR0_TS;
-
-	/* These are emulated via page tables.  */
-	cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
-
+	/*
+	 * re-enable caching here because the QEMU bios
+	 * does not do it - this results in some delay at
+	 * reboot
+	 */
+	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+		cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
 	svm->vmcb->save.cr0 = cr0;
 	mark_dirty(svm->vmcb, VMCB_CR);
 	update_cr0_intercept(svm);
@@ -3351,16 +3257,6 @@
 	case MSR_VM_IGNNE:
 		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
 		break;
-	case MSR_IA32_CR_PAT:
-		if (npt_enabled) {
-			if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
-				return 1;
-			vcpu->arch.pat = data;
-			svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
-			mark_dirty(svm->vmcb, VMCB_NPT);
-			break;
-		}
-		/* fall through */
 	default:
 		return kvm_set_msr_common(vcpu, msr);
 	}
@@ -4195,6 +4091,11 @@
 	return true;
 }
 
+static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+	return 0;
+}
+
 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 {
 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6407674..6a8bc64 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4105,17 +4105,13 @@
 static int alloc_apic_access_page(struct kvm *kvm)
 {
 	struct page *page;
-	struct kvm_userspace_memory_region kvm_userspace_mem;
 	int r = 0;
 
 	mutex_lock(&kvm->slots_lock);
 	if (kvm->arch.apic_access_page_done)
 		goto out;
-	kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
-	kvm_userspace_mem.flags = 0;
-	kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE;
-	kvm_userspace_mem.memory_size = PAGE_SIZE;
-	r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
+	r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+				    APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
 	if (r)
 		goto out;
 
@@ -4140,17 +4136,12 @@
 {
 	/* Called with kvm->slots_lock held. */
 
-	struct kvm_userspace_memory_region kvm_userspace_mem;
 	int r = 0;
 
 	BUG_ON(kvm->arch.ept_identity_pagetable_done);
 
-	kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
-	kvm_userspace_mem.flags = 0;
-	kvm_userspace_mem.guest_phys_addr =
-		kvm->arch.ept_identity_map_addr;
-	kvm_userspace_mem.memory_size = PAGE_SIZE;
-	r = __x86_set_memory_region(kvm, &kvm_userspace_mem);
+	r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
+				    kvm->arch.ept_identity_map_addr, PAGE_SIZE);
 
 	return r;
 }
@@ -4949,14 +4940,9 @@
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
 {
 	int ret;
-	struct kvm_userspace_memory_region tss_mem = {
-		.slot = TSS_PRIVATE_MEMSLOT,
-		.guest_phys_addr = addr,
-		.memory_size = PAGE_SIZE * 3,
-		.flags = 0,
-	};
 
-	ret = x86_set_memory_region(kvm, &tss_mem);
+	ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
+				    PAGE_SIZE * 3);
 	if (ret)
 		return ret;
 	kvm->arch.tss_addr = addr;
@@ -8617,17 +8603,22 @@
 	u64 ipat = 0;
 
 	/* For VT-d and EPT combination
-	 * 1. MMIO: guest may want to apply WC, trust it.
+	 * 1. MMIO: always map as UC
 	 * 2. EPT with VT-d:
 	 *   a. VT-d without snooping control feature: can't guarantee the
-	 *	result, try to trust guest.  So the same as item 1.
+	 *	result, try to trust guest.
 	 *   b. VT-d with snooping control feature: snooping control feature of
 	 *	VT-d engine can guarantee the cache correctness. Just set it
 	 *	to WB to keep consistent with host. So the same as item 3.
 	 * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
 	 *    consistent with host MTRR
 	 */
-	if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+	if (is_mmio) {
+		cache = MTRR_TYPE_UNCACHABLE;
+		goto exit;
+	}
+
+	if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
 		ipat = VMX_EPT_IPAT_BIT;
 		cache = MTRR_TYPE_WRBACK;
 		goto exit;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 991466b..9a9a198 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1708,8 +1708,6 @@
 		vcpu->pvclock_set_guest_stopped_request = false;
 	}
 
-	pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO;
-
 	/* If the host uses TSC clocksource, then it is stable */
 	if (use_master_clock)
 		pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
@@ -2007,8 +2005,6 @@
 					&vcpu->requests);
 
 			ka->boot_vcpu_runs_old_kvmclock = tmp;
-
-			ka->kvmclock_offset = -get_kernel_ns();
 		}
 
 		vcpu->arch.time = data;
@@ -6457,6 +6453,12 @@
 	return 1;
 }
 
+static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
+{
+	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+		!vcpu->arch.apf.halted);
+}
+
 static int vcpu_run(struct kvm_vcpu *vcpu)
 {
 	int r;
@@ -6465,8 +6467,7 @@
 	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 
 	for (;;) {
-		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-		    !vcpu->arch.apf.halted)
+		if (kvm_vcpu_running(vcpu))
 			r = vcpu_enter_guest(vcpu);
 		else
 			r = vcpu_block(kvm, vcpu);
@@ -7478,34 +7479,66 @@
 	kvm_free_pit(kvm);
 }
 
-int __x86_set_memory_region(struct kvm *kvm,
-			    const struct kvm_userspace_memory_region *mem)
+int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 {
 	int i, r;
+	unsigned long hva;
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	struct kvm_memory_slot *slot, old;
 
 	/* Called with kvm->slots_lock held.  */
-	BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM);
+	if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
+		return -EINVAL;
 
+	slot = id_to_memslot(slots, id);
+	if (size) {
+		if (WARN_ON(slot->npages))
+			return -EEXIST;
+
+		/*
+		 * MAP_SHARED to prevent internal slot pages from being moved
+		 * by fork()/COW.
+		 */
+		hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
+			      MAP_SHARED | MAP_ANONYMOUS, 0);
+		if (IS_ERR((void *)hva))
+			return PTR_ERR((void *)hva);
+	} else {
+		if (!slot->npages)
+			return 0;
+
+		hva = 0;
+	}
+
+	old = *slot;
 	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
-		struct kvm_userspace_memory_region m = *mem;
+		struct kvm_userspace_memory_region m;
 
-		m.slot |= i << 16;
+		m.slot = id | (i << 16);
+		m.flags = 0;
+		m.guest_phys_addr = gpa;
+		m.userspace_addr = hva;
+		m.memory_size = size;
 		r = __kvm_set_memory_region(kvm, &m);
 		if (r < 0)
 			return r;
 	}
 
+	if (!size) {
+		r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
+		WARN_ON(r < 0);
+	}
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
 
-int x86_set_memory_region(struct kvm *kvm,
-			  const struct kvm_userspace_memory_region *mem)
+int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 {
 	int r;
 
 	mutex_lock(&kvm->slots_lock);
-	r = __x86_set_memory_region(kvm, mem);
+	r = __x86_set_memory_region(kvm, id, gpa, size);
 	mutex_unlock(&kvm->slots_lock);
 
 	return r;
@@ -7520,16 +7553,9 @@
 		 * unless the the memory map has changed due to process exit
 		 * or fd copying.
 		 */
-		struct kvm_userspace_memory_region mem;
-		memset(&mem, 0, sizeof(mem));
-		mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
-		x86_set_memory_region(kvm, &mem);
-
-		mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
-		x86_set_memory_region(kvm, &mem);
-
-		mem.slot = TSS_PRIVATE_MEMSLOT;
-		x86_set_memory_region(kvm, &mem);
+		x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
+		x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0);
+		x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
 	}
 	kvm_iommu_unmap_guest(kvm);
 	kfree(kvm->arch.vpic);
@@ -7632,27 +7658,6 @@
 				const struct kvm_userspace_memory_region *mem,
 				enum kvm_mr_change change)
 {
-	/*
-	 * Only private memory slots need to be mapped here since
-	 * KVM_SET_MEMORY_REGION ioctl is no longer supported.
-	 */
-	if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
-		unsigned long userspace_addr;
-
-		/*
-		 * MAP_SHARED to prevent internal slot pages from being moved
-		 * by fork()/COW.
-		 */
-		userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
-					 PROT_READ | PROT_WRITE,
-					 MAP_SHARED | MAP_ANONYMOUS, 0);
-
-		if (IS_ERR((void *)userspace_addr))
-			return PTR_ERR((void *)userspace_addr);
-
-		memslot->userspace_addr = userspace_addr;
-	}
-
 	return 0;
 }
 
@@ -7714,17 +7719,6 @@
 {
 	int nr_mmu_pages = 0;
 
-	if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) {
-		int ret;
-
-		ret = vm_munmap(old->userspace_addr,
-				old->npages * PAGE_SIZE);
-		if (ret < 0)
-			printk(KERN_WARNING
-			       "kvm_vm_ioctl_set_memory_region: "
-			       "failed to munmap memory\n");
-	}
-
 	if (!kvm->arch.n_requested_mmu_pages)
 		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
 
@@ -7773,19 +7767,36 @@
 	kvm_mmu_invalidate_zap_all_pages(kvm);
 }
 
+static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+{
+	if (!list_empty_careful(&vcpu->async_pf.done))
+		return true;
+
+	if (kvm_apic_has_events(vcpu))
+		return true;
+
+	if (vcpu->arch.pv.pv_unhalted)
+		return true;
+
+	if (atomic_read(&vcpu->arch.nmi_queued))
+		return true;
+
+	if (test_bit(KVM_REQ_SMI, &vcpu->requests))
+		return true;
+
+	if (kvm_arch_interrupt_allowed(vcpu) &&
+	    kvm_cpu_has_interrupt(vcpu))
+		return true;
+
+	return false;
+}
+
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
 	if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
 		kvm_x86_ops->check_nested_events(vcpu, false);
 
-	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-		!vcpu->arch.apf.halted)
-		|| !list_empty_careful(&vcpu->async_pf.done)
-		|| kvm_apic_has_events(vcpu)
-		|| vcpu->arch.pv.pv_unhalted
-		|| atomic_read(&vcpu->arch.nmi_queued) ||
-		(kvm_arch_interrupt_allowed(vcpu) &&
-		 kvm_cpu_has_interrupt(vcpu));
+	return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 30564e2..df48430 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1132,7 +1132,7 @@
 	 * has been zapped already via cleanup_highmem().
 	 */
 	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
-	set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
+	set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
 
 	rodata_test();
 
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 70efcd0..7599197 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1109,7 +1109,7 @@
 		bpf_flush_icache(header, image + proglen);
 		set_memory_ro((unsigned long)header, header->pages);
 		prog->bpf_func = (void *)image;
-		prog->jited = true;
+		prog->jited = 1;
 	}
 out:
 	kfree(addrs);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 1db84c0..6a28ded 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -705,6 +705,70 @@
 }
 
 /*
+ * Iterate the EFI memory map in reverse order because the regions
+ * will be mapped top-down. The end result is the same as if we had
+ * mapped things forward, but doesn't require us to change the
+ * existing implementation of efi_map_region().
+ */
+static inline void *efi_map_next_entry_reverse(void *entry)
+{
+	/* Initial call */
+	if (!entry)
+		return memmap.map_end - memmap.desc_size;
+
+	entry -= memmap.desc_size;
+	if (entry < memmap.map)
+		return NULL;
+
+	return entry;
+}
+
+/*
+ * efi_map_next_entry - Return the next EFI memory map descriptor
+ * @entry: Previous EFI memory map descriptor
+ *
+ * This is a helper function to iterate over the EFI memory map, which
+ * we do in different orders depending on the current configuration.
+ *
+ * To begin traversing the memory map @entry must be %NULL.
+ *
+ * Returns %NULL when we reach the end of the memory map.
+ */
+static void *efi_map_next_entry(void *entry)
+{
+	if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
+		/*
+		 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
+		 * config table feature requires us to map all entries
+		 * in the same order as they appear in the EFI memory
+		 * map. That is to say, entry N must have a lower
+		 * virtual address than entry N+1. This is because the
+		 * firmware toolchain leaves relative references in
+		 * the code/data sections, which are split and become
+		 * separate EFI memory regions. Mapping things
+		 * out-of-order leads to the firmware accessing
+		 * unmapped addresses.
+		 *
+		 * Since we need to map things this way whether or not
+		 * the kernel actually makes use of
+		 * EFI_PROPERTIES_TABLE, let's just switch to this
+		 * scheme by default for 64-bit.
+		 */
+		return efi_map_next_entry_reverse(entry);
+	}
+
+	/* Initial call */
+	if (!entry)
+		return memmap.map;
+
+	entry += memmap.desc_size;
+	if (entry >= memmap.map_end)
+		return NULL;
+
+	return entry;
+}
+
+/*
  * Map the efi memory ranges of the runtime services and update new_mmap with
  * virtual addresses.
  */
@@ -714,7 +778,8 @@
 	unsigned long left = 0;
 	efi_memory_desc_t *md;
 
-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+	p = NULL;
+	while ((p = efi_map_next_entry(p))) {
 		md = p;
 		if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 30d12af..993b7a7 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -33,6 +33,10 @@
 #include <linux/memblock.h>
 #include <linux/edd.h>
 
+#ifdef CONFIG_KEXEC_CORE
+#include <linux/kexec.h>
+#endif
+
 #include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/interface/xen.h>
@@ -1077,6 +1081,7 @@
 		/* Fast syscall setup is all done in hypercalls, so
 		   these are all ignored.  Stub them out here to stop
 		   Xen console noise. */
+		break;
 
 	default:
 		if (!pmu_msr_write(msr, low, high, &ret))
@@ -1807,6 +1812,21 @@
 	.notifier_call	= xen_hvm_cpu_notify,
 };
 
+#ifdef CONFIG_KEXEC_CORE
+static void xen_hvm_shutdown(void)
+{
+	native_machine_shutdown();
+	if (kexec_in_progress)
+		xen_reboot(SHUTDOWN_soft_reset);
+}
+
+static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+{
+	native_machine_crash_shutdown(regs);
+	xen_reboot(SHUTDOWN_soft_reset);
+}
+#endif
+
 static void __init xen_hvm_guest_init(void)
 {
 	if (xen_pv_domain())
@@ -1826,6 +1846,10 @@
 	x86_init.irqs.intr_init = xen_init_IRQ;
 	xen_hvm_init_time_ops();
 	xen_hvm_init_mmu_ops();
+#ifdef CONFIG_KEXEC_CORE
+	machine_ops.shutdown = xen_hvm_shutdown;
+	machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+#endif
 }
 #endif
 
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index bfc08b1..660b3cf 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -112,6 +112,15 @@
 static pte_t *p2m_missing_pte;
 static pte_t *p2m_identity_pte;
 
+/*
+ * Hint at last populated PFN.
+ *
+ * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
+ * can avoid scanning the whole P2M (which may be sized to account for
+ * hotplugged memory).
+ */
+static unsigned long xen_p2m_last_pfn;
+
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
 	BUG_ON(pfn >= MAX_P2M_PFN);
@@ -270,7 +279,7 @@
 	else
 		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
 			virt_to_mfn(p2m_top_mfn);
-	HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
+	HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
 	HYPERVISOR_shared_info->arch.p2m_generation = 0;
 	HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
 	HYPERVISOR_shared_info->arch.p2m_cr3 =
@@ -406,6 +415,8 @@
 	static struct vm_struct vm;
 	unsigned long p2m_limit;
 
+	xen_p2m_last_pfn = xen_max_p2m_pfn;
+
 	p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
 	vm.flags = VM_ALLOC;
 	vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
@@ -608,6 +619,12 @@
 			free_p2m_page(p2m);
 	}
 
+	/* Expanded the p2m? */
+	if (pfn > xen_p2m_last_pfn) {
+		xen_p2m_last_pfn = pfn;
+		HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
+	}
+
 	return true;
 }
 
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index f5ef674..1c30e4a 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -548,7 +548,7 @@
 {
 	unsigned long max_pages, limit;
 	domid_t domid = DOMID_SELF;
-	int ret;
+	long ret;
 
 	limit = xen_get_pages_limit();
 	max_pages = limit;
@@ -798,7 +798,7 @@
 		xen_ignore_unusable();
 
 	/* Make sure the Xen-supplied memory map is well-ordered. */
-	sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
+	sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
 			  &xen_e820_map_entries);
 
 	max_pages = xen_get_max_pages();
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 63c223d..b56855a 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -28,4 +28,5 @@
 generic-y += termios.h
 generic-y += topology.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 1e28ddb..8764c24 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -31,7 +31,8 @@
 	return cpu;
 }
 
-int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
+int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
+			    const struct cpumask *online_mask)
 {
 	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
 	cpumask_var_t cpus;
@@ -41,7 +42,7 @@
 
 	cpumask_clear(cpus);
 	nr_cpus = nr_uniq_cpus = 0;
-	for_each_online_cpu(i) {
+	for_each_cpu(i, online_mask) {
 		nr_cpus++;
 		first_sibling = get_first_sibling(i);
 		if (!cpumask_test_cpu(first_sibling, cpus))
@@ -51,7 +52,7 @@
 
 	queue = 0;
 	for_each_possible_cpu(i) {
-		if (!cpu_online(i)) {
+		if (!cpumask_test_cpu(i, online_mask)) {
 			map[i] = 0;
 			continue;
 		}
@@ -95,7 +96,7 @@
 	if (!map)
 		return NULL;
 
-	if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
+	if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
 		return map;
 
 	kfree(map);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 279c5d6..788fffd 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -229,8 +229,6 @@
 	unsigned int i, first = 1;
 	ssize_t ret = 0;
 
-	blk_mq_disable_hotplug();
-
 	for_each_cpu(i, hctx->cpumask) {
 		if (first)
 			ret += sprintf(ret + page, "%u", i);
@@ -240,8 +238,6 @@
 		first = 0;
 	}
 
-	blk_mq_enable_hotplug();
-
 	ret += sprintf(ret + page, "\n");
 	return ret;
 }
@@ -343,7 +339,7 @@
 	struct blk_mq_ctx *ctx;
 	int i;
 
-	if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+	if (!hctx->nr_ctx)
 		return;
 
 	hctx_for_each_ctx(hctx, ctx, i)
@@ -358,7 +354,7 @@
 	struct blk_mq_ctx *ctx;
 	int i, ret;
 
-	if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+	if (!hctx->nr_ctx)
 		return 0;
 
 	ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
@@ -381,6 +377,8 @@
 	struct blk_mq_ctx *ctx;
 	int i, j;
 
+	blk_mq_disable_hotplug();
+
 	queue_for_each_hw_ctx(q, hctx, i) {
 		blk_mq_unregister_hctx(hctx);
 
@@ -395,6 +393,9 @@
 	kobject_put(&q->mq_kobj);
 
 	kobject_put(&disk_to_dev(disk)->kobj);
+
+	q->mq_sysfs_init_done = false;
+	blk_mq_enable_hotplug();
 }
 
 static void blk_mq_sysfs_init(struct request_queue *q)
@@ -425,27 +426,30 @@
 	struct blk_mq_hw_ctx *hctx;
 	int ret, i;
 
+	blk_mq_disable_hotplug();
+
 	blk_mq_sysfs_init(q);
 
 	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
 	if (ret < 0)
-		return ret;
+		goto out;
 
 	kobject_uevent(&q->mq_kobj, KOBJ_ADD);
 
 	queue_for_each_hw_ctx(q, hctx, i) {
-		hctx->flags |= BLK_MQ_F_SYSFS_UP;
 		ret = blk_mq_register_hctx(hctx);
 		if (ret)
 			break;
 	}
 
-	if (ret) {
+	if (ret)
 		blk_mq_unregister_disk(disk);
-		return ret;
-	}
+	else
+		q->mq_sysfs_init_done = true;
+out:
+	blk_mq_enable_hotplug();
 
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(blk_mq_register_disk);
 
@@ -454,6 +458,9 @@
 	struct blk_mq_hw_ctx *hctx;
 	int i;
 
+	if (!q->mq_sysfs_init_done)
+		return;
+
 	queue_for_each_hw_ctx(q, hctx, i)
 		blk_mq_unregister_hctx(hctx);
 }
@@ -463,6 +470,9 @@
 	struct blk_mq_hw_ctx *hctx;
 	int i, ret = 0;
 
+	if (!q->mq_sysfs_init_done)
+		return ret;
+
 	queue_for_each_hw_ctx(q, hctx, i) {
 		ret = blk_mq_register_hctx(hctx);
 		if (ret)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 9115c6d..ed96474 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -471,17 +471,30 @@
 }
 EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
 
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 		void *priv)
 {
-	struct blk_mq_tags *tags = hctx->tags;
+	struct blk_mq_hw_ctx *hctx;
+	int i;
 
-	if (tags->nr_reserved_tags)
-		bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
-	bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
-			false);
+
+	queue_for_each_hw_ctx(q, hctx, i) {
+		struct blk_mq_tags *tags = hctx->tags;
+
+		/*
+		 * If not software queues are currently mapped to this
+		 * hardware queue, there's nothing to check
+		 */
+		if (!blk_mq_hw_queue_mapped(hctx))
+			continue;
+
+		if (tags->nr_reserved_tags)
+			bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
+		bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
+		      false);
+	}
+
 }
-EXPORT_SYMBOL(blk_mq_tag_busy_iter);
 
 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
 {
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 9eb2cf4..d468a79 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -58,6 +58,8 @@
 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
+		void *priv);
 
 enum {
 	BLK_MQ_TAG_CACHE_MIN	= 1,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f2d67b4..7785ae96 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -393,14 +393,16 @@
  *	Ends all I/O on a request. It does not handle partial completions.
  *	The actual completion happens out-of-order, through a IPI handler.
  **/
-void blk_mq_complete_request(struct request *rq)
+void blk_mq_complete_request(struct request *rq, int error)
 {
 	struct request_queue *q = rq->q;
 
 	if (unlikely(blk_should_fake_timeout(q)))
 		return;
-	if (!blk_mark_rq_complete(rq))
+	if (!blk_mark_rq_complete(rq)) {
+		rq->errors = error;
 		__blk_mq_complete_request(rq);
+	}
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
@@ -616,10 +618,8 @@
 		 * If a request wasn't started before the queue was
 		 * marked dying, kill it here or it'll go unnoticed.
 		 */
-		if (unlikely(blk_queue_dying(rq->q))) {
-			rq->errors = -EIO;
-			blk_mq_complete_request(rq);
-		}
+		if (unlikely(blk_queue_dying(rq->q)))
+			blk_mq_complete_request(rq, -EIO);
 		return;
 	}
 	if (rq->cmd_flags & REQ_NO_TIMEOUT)
@@ -641,24 +641,16 @@
 		.next		= 0,
 		.next_set	= 0,
 	};
-	struct blk_mq_hw_ctx *hctx;
 	int i;
 
-	queue_for_each_hw_ctx(q, hctx, i) {
-		/*
-		 * If not software queues are currently mapped to this
-		 * hardware queue, there's nothing to check
-		 */
-		if (!blk_mq_hw_queue_mapped(hctx))
-			continue;
-
-		blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
-	}
+	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
 
 	if (data.next_set) {
 		data.next = blk_rq_timeout(round_jiffies_up(data.next));
 		mod_timer(&q->timeout, data.next);
 	} else {
+		struct blk_mq_hw_ctx *hctx;
+
 		queue_for_each_hw_ctx(q, hctx, i) {
 			/* the hctx may be unmapped, so check it here */
 			if (blk_mq_hw_queue_mapped(hctx))
@@ -1789,13 +1781,19 @@
 	}
 }
 
-static void blk_mq_map_swqueue(struct request_queue *q)
+static void blk_mq_map_swqueue(struct request_queue *q,
+			       const struct cpumask *online_mask)
 {
 	unsigned int i;
 	struct blk_mq_hw_ctx *hctx;
 	struct blk_mq_ctx *ctx;
 	struct blk_mq_tag_set *set = q->tag_set;
 
+	/*
+	 * Avoid others reading imcomplete hctx->cpumask through sysfs
+	 */
+	mutex_lock(&q->sysfs_lock);
+
 	queue_for_each_hw_ctx(q, hctx, i) {
 		cpumask_clear(hctx->cpumask);
 		hctx->nr_ctx = 0;
@@ -1806,16 +1804,17 @@
 	 */
 	queue_for_each_ctx(q, ctx, i) {
 		/* If the cpu isn't online, the cpu is mapped to first hctx */
-		if (!cpu_online(i))
+		if (!cpumask_test_cpu(i, online_mask))
 			continue;
 
 		hctx = q->mq_ops->map_queue(q, i);
 		cpumask_set_cpu(i, hctx->cpumask);
-		cpumask_set_cpu(i, hctx->tags->cpumask);
 		ctx->index_hw = hctx->nr_ctx;
 		hctx->ctxs[hctx->nr_ctx++] = ctx;
 	}
 
+	mutex_unlock(&q->sysfs_lock);
+
 	queue_for_each_hw_ctx(q, hctx, i) {
 		struct blk_mq_ctxmap *map = &hctx->ctx_map;
 
@@ -1851,6 +1850,14 @@
 		hctx->next_cpu = cpumask_first(hctx->cpumask);
 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
 	}
+
+	queue_for_each_ctx(q, ctx, i) {
+		if (!cpumask_test_cpu(i, online_mask))
+			continue;
+
+		hctx = q->mq_ops->map_queue(q, i);
+		cpumask_set_cpu(i, hctx->tags->cpumask);
+	}
 }
 
 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
@@ -1918,6 +1925,9 @@
 		kfree(hctx);
 	}
 
+	kfree(q->mq_map);
+	q->mq_map = NULL;
+
 	kfree(q->queue_hw_ctx);
 
 	/* ctx kobj stays in queue_ctx */
@@ -2027,13 +2037,15 @@
 	if (blk_mq_init_hw_queues(q, set))
 		goto err_hctxs;
 
+	get_online_cpus();
 	mutex_lock(&all_q_mutex);
+
 	list_add_tail(&q->all_q_node, &all_q_list);
-	mutex_unlock(&all_q_mutex);
-
 	blk_mq_add_queue_tag_set(set, q);
+	blk_mq_map_swqueue(q, cpu_online_mask);
 
-	blk_mq_map_swqueue(q);
+	mutex_unlock(&all_q_mutex);
+	put_online_cpus();
 
 	return q;
 
@@ -2057,30 +2069,27 @@
 {
 	struct blk_mq_tag_set	*set = q->tag_set;
 
+	mutex_lock(&all_q_mutex);
+	list_del_init(&q->all_q_node);
+	mutex_unlock(&all_q_mutex);
+
 	blk_mq_del_queue_tag_set(q);
 
 	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
 	blk_mq_free_hw_queues(q, set);
 
 	percpu_ref_exit(&q->mq_usage_counter);
-
-	kfree(q->mq_map);
-
-	q->mq_map = NULL;
-
-	mutex_lock(&all_q_mutex);
-	list_del_init(&q->all_q_node);
-	mutex_unlock(&all_q_mutex);
 }
 
 /* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q)
+static void blk_mq_queue_reinit(struct request_queue *q,
+				const struct cpumask *online_mask)
 {
 	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
 
 	blk_mq_sysfs_unregister(q);
 
-	blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
+	blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
 
 	/*
 	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
@@ -2088,7 +2097,7 @@
 	 * involves free and re-allocate memory, worthy doing?)
 	 */
 
-	blk_mq_map_swqueue(q);
+	blk_mq_map_swqueue(q, online_mask);
 
 	blk_mq_sysfs_register(q);
 }
@@ -2097,16 +2106,43 @@
 				      unsigned long action, void *hcpu)
 {
 	struct request_queue *q;
+	int cpu = (unsigned long)hcpu;
+	/*
+	 * New online cpumask which is going to be set in this hotplug event.
+	 * Declare this cpumasks as global as cpu-hotplug operation is invoked
+	 * one-by-one and dynamically allocating this could result in a failure.
+	 */
+	static struct cpumask online_new;
 
 	/*
-	 * Before new mappings are established, hotadded cpu might already
-	 * start handling requests. This doesn't break anything as we map
-	 * offline CPUs to first hardware queue. We will re-init the queue
-	 * below to get optimal settings.
+	 * Before hotadded cpu starts handling requests, new mappings must
+	 * be established.  Otherwise, these requests in hw queue might
+	 * never be dispatched.
+	 *
+	 * For example, there is a single hw queue (hctx) and two CPU queues
+	 * (ctx0 for CPU0, and ctx1 for CPU1).
+	 *
+	 * Now CPU1 is just onlined and a request is inserted into
+	 * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
+	 * still zero.
+	 *
+	 * And then while running hw queue, flush_busy_ctxs() finds bit0 is
+	 * set in pending bitmap and tries to retrieve requests in
+	 * hctx->ctxs[0]->rq_list.  But htx->ctxs[0] is a pointer to ctx0,
+	 * so the request in ctx1->rq_list is ignored.
 	 */
-	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
-	    action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_DEAD:
+	case CPU_UP_CANCELED:
+		cpumask_copy(&online_new, cpu_online_mask);
+		break;
+	case CPU_UP_PREPARE:
+		cpumask_copy(&online_new, cpu_online_mask);
+		cpumask_set_cpu(cpu, &online_new);
+		break;
+	default:
 		return NOTIFY_OK;
+	}
 
 	mutex_lock(&all_q_mutex);
 
@@ -2130,7 +2166,7 @@
 	}
 
 	list_for_each_entry(q, &all_q_list, all_q_node)
-		blk_mq_queue_reinit(q);
+		blk_mq_queue_reinit(q, &online_new);
 
 	list_for_each_entry(q, &all_q_list, all_q_node)
 		blk_mq_unfreeze_queue(q);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 6a48c4c..f4fea79 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -51,7 +51,8 @@
  * CPU -> queue mappings
  */
 extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
-extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
+extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
+				   const struct cpumask *online_mask);
 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
 
 /*
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 8acb88603..9c1dc8d 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -544,7 +544,8 @@
 	struct crypto_alg *base = &alg->halg.base;
 
 	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
-	    alg->halg.statesize > PAGE_SIZE / 8)
+	    alg->halg.statesize > PAGE_SIZE / 8 ||
+	    alg->halg.statesize == 0)
 		return -EINVAL;
 
 	base->cra_type = &crypto_ahash_type;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 6d88dd1..1970966 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -332,10 +332,6 @@
 		srlen = cert->raw_serial_size;
 		q = cert->raw_serial;
 	}
-	if (srlen > 1 && *q == 0) {
-		srlen--;
-		q++;
-	}
 
 	ret = -ENOMEM;
 	desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 09f37b5..4dde37c 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -61,6 +61,7 @@
 ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
 ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX);
 
 #if (!ACPI_REDUCED_HARDWARE)
 ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index f7731f2..591ea95 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -85,7 +85,7 @@
 /*
  * tbfadt - FADT parse/convert/validate
  */
-void acpi_tb_parse_fadt(u32 table_index);
+void acpi_tb_parse_fadt(void);
 
 void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length);
 
@@ -138,8 +138,6 @@
  */
 acpi_status acpi_tb_initialize_facs(void);
 
-u8 acpi_tb_tables_loaded(void);
-
 void
 acpi_tb_print_table_header(acpi_physical_address address,
 			   struct acpi_table_header *header);
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index faad911..10ce48e 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -71,7 +71,7 @@
 
 	/* ACPI tables must be present */
 
-	if (!acpi_tb_tables_loaded()) {
+	if (acpi_gbl_fadt_index == ACPI_INVALID_TABLE_INDEX) {
 		return_ACPI_STATUS(AE_NO_ACPI_TABLES);
 	}
 
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 455a070..a6454f4 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -298,7 +298,7 @@
  *
  * FUNCTION:    acpi_tb_parse_fadt
  *
- * PARAMETERS:  table_index         - Index for the FADT
+ * PARAMETERS:  None
  *
  * RETURN:      None
  *
@@ -307,7 +307,7 @@
  *
  ******************************************************************************/
 
-void acpi_tb_parse_fadt(u32 table_index)
+void acpi_tb_parse_fadt(void)
 {
 	u32 length;
 	struct acpi_table_header *table;
@@ -319,11 +319,11 @@
 	 * Get a local copy of the FADT and convert it to a common format
 	 * Map entire FADT, assumed to be smaller than one page.
 	 */
-	length = acpi_gbl_root_table_list.tables[table_index].length;
+	length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length;
 
 	table =
-	    acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index].
-			       address, length);
+	    acpi_os_map_memory(acpi_gbl_root_table_list.
+			       tables[acpi_gbl_fadt_index].address, length);
 	if (!table) {
 		return;
 	}
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 4337990..d8ddef3 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -99,29 +99,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_tb_tables_loaded
- *
- * PARAMETERS:  None
- *
- * RETURN:      TRUE if required ACPI tables are loaded
- *
- * DESCRIPTION: Determine if the minimum required ACPI tables are present
- *              (FADT, FACS, DSDT)
- *
- ******************************************************************************/
-
-u8 acpi_tb_tables_loaded(void)
-{
-
-	if (acpi_gbl_root_table_list.current_table_count >= 4) {
-		return (TRUE);
-	}
-
-	return (FALSE);
-}
-
-/*******************************************************************************
- *
  * FUNCTION:    acpi_tb_check_dsdt_header
  *
  * PARAMETERS:  None
@@ -392,7 +369,8 @@
 		    ACPI_COMPARE_NAME(&acpi_gbl_root_table_list.
 				      tables[table_index].signature,
 				      ACPI_SIG_FADT)) {
-			acpi_tb_parse_fadt(table_index);
+			acpi_gbl_fadt_index = table_index;
+			acpi_tb_parse_fadt();
 		}
 
 next_table:
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 2614a83..42c66b6 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1044,8 +1044,10 @@
 		goto err_exit;
 
 	mutex_lock(&ec->mutex);
+	result = -ENODATA;
 	list_for_each_entry(handler, &ec->list, node) {
 		if (value == handler->query_bit) {
+			result = 0;
 			q->handler = acpi_ec_get_query_handler(handler);
 			ec_dbg_evt("Query(0x%02x) scheduled",
 				   q->handler->query_bit);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 6da0f9b..c933675 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -372,6 +372,7 @@
 
 	/* Interrupt Line values above 0xF are forbidden */
 	if (dev->irq > 0 && (dev->irq <= 0xF) &&
+	    acpi_isa_irq_available(dev->irq) &&
 	    (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
 		dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
 			 pin_name(dev->pin), dev->irq);
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 3b4ea98..7c8408b 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -498,8 +498,7 @@
 			    PIRQ_PENALTY_PCI_POSSIBLE;
 		}
 	}
-	/* Add a penalty for the SCI */
-	acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
+
 	return 0;
 }
 
@@ -553,6 +552,13 @@
 				irq = link->irq.possible[i];
 		}
 	}
+	if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
+		printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
+			    "Try pci=noacpi or acpi=off\n",
+			    acpi_device_name(link->device),
+			    acpi_device_bid(link->device));
+		return -ENODEV;
+	}
 
 	/* Attempt to enable the link device at this IRQ. */
 	if (acpi_pci_link_set(link, irq)) {
@@ -821,6 +827,12 @@
 	}
 }
 
+bool acpi_isa_irq_available(int irq)
+{
+	return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
+			    acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
+}
+
 /*
  * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
  * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 65e6590..7d00f29 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -112,7 +112,8 @@
 
 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
-   if (!entry) return -1;
+   if (!entry)
+      return -ENOMEM;
    entry->data = data;
    entry->next = NULL;
    if (que->next == NULL) 
@@ -1175,7 +1176,7 @@
         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
            if (vcc->vci < 32)
               printk("Drop control packets\n");
-	      goto out_free_desc;
+	   goto out_free_desc;
         }
 	skb_put(skb,len);  
         // pwang_test
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 764280a..e9fd32e 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -148,7 +148,11 @@
 
 			if (sibling == cpu) /* skip itself */
 				continue;
+
 			sib_cpu_ci = get_cpu_cacheinfo(sibling);
+			if (!sib_cpu_ci->info_list)
+				continue;
+
 			sib_leaf = sib_cpu_ci->info_list + index;
 			cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
 			cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
@@ -159,6 +163,9 @@
 
 static void free_cache_attributes(unsigned int cpu)
 {
+	if (!per_cpu_cacheinfo(cpu))
+		return;
+
 	cache_shared_cpu_map_remove(cpu);
 
 	kfree(per_cpu_cacheinfo(cpu));
@@ -514,8 +521,7 @@
 		break;
 	case CPU_DEAD:
 		cache_remove_dev(cpu);
-		if (per_cpu_cacheinfo(cpu))
-			free_cache_attributes(cpu);
+		free_cache_attributes(cpu);
 		break;
 	}
 	return notifier_from_errno(rc);
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 2a4154a..85e17ba 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -77,13 +77,16 @@
 				      dev_update_qos_constraint);
 
 	if (constraint_ns > 0) {
-		constraint_ns -= td->start_latency_ns;
+		constraint_ns -= td->save_state_latency_ns +
+				td->stop_latency_ns +
+				td->start_latency_ns +
+				td->restore_state_latency_ns;
 		if (constraint_ns == 0)
 			return false;
 	}
 	td->effective_constraint_ns = constraint_ns;
-	td->cached_stop_ok = constraint_ns > td->stop_latency_ns ||
-				constraint_ns == 0;
+	td->cached_stop_ok = constraint_ns >= 0;
+
 	/*
 	 * The children have been suspended already, so we don't need to take
 	 * their stop latencies into account here.
@@ -126,18 +129,6 @@
 
 	off_on_time_ns = genpd->power_off_latency_ns +
 				genpd->power_on_latency_ns;
-	/*
-	 * It doesn't make sense to remove power from the domain if saving
-	 * the state of all devices in it and the power off/power on operations
-	 * take too much time.
-	 *
-	 * All devices in this domain have been stopped already at this point.
-	 */
-	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
-		if (pdd->dev->driver)
-			off_on_time_ns +=
-				to_gpd_data(pdd)->td.save_state_latency_ns;
-	}
 
 	min_off_time_ns = -1;
 	/*
@@ -193,7 +184,6 @@
 		 * constraint_ns cannot be negative here, because the device has
 		 * been suspended.
 		 */
-		constraint_ns -= td->restore_state_latency_ns;
 		if (constraint_ns <= off_on_time_ns)
 			return false;
 
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 28cd75c..7ae7cd9 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -892,10 +892,17 @@
 	u32 microvolt[3] = {0};
 	int count, ret;
 
-	count = of_property_count_u32_elems(opp->np, "opp-microvolt");
-	if (!count)
+	/* Missing property isn't a problem, but an invalid entry is */
+	if (!of_find_property(opp->np, "opp-microvolt", NULL))
 		return 0;
 
+	count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+	if (count < 0) {
+		dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
+			__func__, count);
+		return count;
+	}
+
 	/* There can be one or three elements here */
 	if (count != 1 && count != 3) {
 		dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
@@ -1063,7 +1070,7 @@
  * share a common logic which is isolated here.
  *
  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
@@ -1151,7 +1158,7 @@
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  *
  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  */
 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
@@ -1177,7 +1184,7 @@
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  *
  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  */
 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index cc55788..628ad7a 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -98,6 +98,8 @@
 
 	int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
 	int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+	int (*reg_update_bits)(void *context, unsigned int reg,
+			       unsigned int mask, unsigned int val);
 
 	bool defer_caching;
 
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index f42f2ba..4c55cfb 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -32,8 +32,7 @@
 /* Calculate the length of a fixed format  */
 static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
 {
-	snprintf(buf, buf_size, "%x", max_val);
-	return strlen(buf);
+	return snprintf(NULL, 0, "%x", max_val);
 }
 
 static ssize_t regmap_name_read_file(struct file *file,
@@ -432,7 +431,7 @@
 		/* If we're in the region the user is trying to read */
 		if (p >= *ppos) {
 			/* ...but not beyond it */
-			if (buf_pos >= count - 1 - tot_len)
+			if (buf_pos + tot_len + 1 >= count)
 				break;
 
 			/* Format the register */
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index afaf562..8cd155a 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -619,6 +619,7 @@
 		goto skip_format_initialization;
 	} else {
 		map->reg_read  = _regmap_bus_read;
+		map->reg_update_bits = bus->reg_update_bits;
 	}
 
 	reg_endian = regmap_get_reg_endian(bus, config);
@@ -2509,20 +2510,26 @@
 	int ret;
 	unsigned int tmp, orig;
 
-	ret = _regmap_read(map, reg, &orig);
-	if (ret != 0)
-		return ret;
+	if (change)
+		*change = false;
 
-	tmp = orig & ~mask;
-	tmp |= val & mask;
-
-	if (force_write || (tmp != orig)) {
-		ret = _regmap_write(map, reg, tmp);
-		if (change)
+	if (regmap_volatile(map, reg) && map->reg_update_bits) {
+		ret = map->reg_update_bits(map->bus_context, reg, mask, val);
+		if (ret == 0 && change)
 			*change = true;
 	} else {
-		if (change)
-			*change = false;
+		ret = _regmap_read(map, reg, &orig);
+		if (ret != 0)
+			return ret;
+
+		tmp = orig & ~mask;
+		tmp |= val & mask;
+
+		if (force_write || (tmp != orig)) {
+			ret = _regmap_write(map, reg, tmp);
+			if (ret == 0 && change)
+				*change = true;
+		}
 	}
 
 	return ret;
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 24882c1..59d8d0d 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -436,13 +436,8 @@
 	}
 
 	dev = bcma_bus_get_host_dev(bus);
-	/* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when
-	 * of_default_bus_match_table is exported or in some other way
-	 * accessible. This is just a temporary workaround.
-	 */
-	if (IS_BUILTIN(CONFIG_BCMA) && dev) {
-		of_platform_populate(dev->of_node, of_default_bus_match_table,
-				     NULL, dev);
+	if (dev) {
+		of_platform_default_populate(dev->of_node, NULL, dev);
 	}
 
 	/* Cores providing flash access go before SPROM init */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f9889b6..674f800 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1486,17 +1486,16 @@
 {
 	const bool write = cmd->rq->cmd_flags & REQ_WRITE;
 	struct loop_device *lo = cmd->rq->q->queuedata;
-	int ret = -EIO;
+	int ret = 0;
 
-	if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
+	if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+		ret = -EIO;
 		goto failed;
+	}
 
 	ret = do_req_filebacked(lo, cmd->rq);
-
  failed:
-	if (ret)
-		cmd->rq->errors = -EIO;
-	blk_mq_complete_request(cmd->rq);
+	blk_mq_complete_request(cmd->rq, ret ? -EIO : 0);
 }
 
 static void loop_queue_write_work(struct work_struct *work)
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a295b98..1c9e4fe 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -289,7 +289,7 @@
 	case NULL_IRQ_SOFTIRQ:
 		switch (queue_mode)  {
 		case NULL_Q_MQ:
-			blk_mq_complete_request(cmd->rq);
+			blk_mq_complete_request(cmd->rq, cmd->rq->errors);
 			break;
 		case NULL_Q_RQ:
 			blk_complete_request(cmd->rq);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b97fc3f..6f04771 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -618,16 +618,15 @@
 			spin_unlock_irqrestore(req->q->queue_lock, flags);
 			return;
 		}
+
 		if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
 			if (cmd_rq->ctx == CMD_CTX_CANCELLED)
-				req->errors = -EINTR;
-			else
-				req->errors = status;
+				status = -EINTR;
 		} else {
-			req->errors = nvme_error_status(status);
+			status = nvme_error_status(status);
 		}
-	} else
-		req->errors = 0;
+	}
+
 	if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
 		u32 result = le32_to_cpup(&cqe->result);
 		req->special = (void *)(uintptr_t)result;
@@ -650,7 +649,7 @@
 	}
 	nvme_free_iod(nvmeq->dev, iod);
 
-	blk_mq_complete_request(req);
+	blk_mq_complete_request(req, status);
 }
 
 /* length is in bytes.  gfp flags indicates whether we may sleep. */
@@ -863,8 +862,7 @@
 	if (ns && ns->ms && !blk_integrity_rq(req)) {
 		if (!(ns->pi_type && ns->ms == 8) &&
 					req->cmd_type != REQ_TYPE_DRV_PRIV) {
-			req->errors = -EFAULT;
-			blk_mq_complete_request(req);
+			blk_mq_complete_request(req, -EFAULT);
 			return BLK_MQ_RQ_QUEUE_OK;
 		}
 	}
@@ -2439,6 +2437,22 @@
 	list_sort(NULL, &dev->namespaces, ns_cmp);
 }
 
+static void nvme_set_irq_hints(struct nvme_dev *dev)
+{
+	struct nvme_queue *nvmeq;
+	int i;
+
+	for (i = 0; i < dev->online_queues; i++) {
+		nvmeq = dev->queues[i];
+
+		if (!nvmeq->tags || !(*nvmeq->tags))
+			continue;
+
+		irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+					blk_mq_tags_cpumask(*nvmeq->tags));
+	}
+}
+
 static void nvme_dev_scan(struct work_struct *work)
 {
 	struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
@@ -2450,6 +2464,7 @@
 		return;
 	nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
 	kfree(ctrl);
+	nvme_set_irq_hints(dev);
 }
 
 /*
@@ -2953,22 +2968,6 @@
 	.compat_ioctl	= nvme_dev_ioctl,
 };
 
-static void nvme_set_irq_hints(struct nvme_dev *dev)
-{
-	struct nvme_queue *nvmeq;
-	int i;
-
-	for (i = 0; i < dev->online_queues; i++) {
-		nvmeq = dev->queues[i];
-
-		if (!nvmeq->tags || !(*nvmeq->tags))
-			continue;
-
-		irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
-					blk_mq_tags_cpumask(*nvmeq->tags));
-	}
-}
-
 static int nvme_dev_start(struct nvme_dev *dev)
 {
 	int result;
@@ -3010,8 +3009,6 @@
 	if (result)
 		goto free_tags;
 
-	nvme_set_irq_hints(dev);
-
 	dev->event_limit = 1;
 	return result;
 
@@ -3062,7 +3059,6 @@
 	} else {
 		nvme_unfreeze_queues(dev);
 		nvme_dev_add(dev);
-		nvme_set_irq_hints(dev);
 	}
 	return 0;
 }
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d93a037..f5e49b6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1863,9 +1863,11 @@
 		rbd_osd_read_callback(obj_request);
 		break;
 	case CEPH_OSD_OP_SETALLOCHINT:
-		rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE);
+		rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
+			   osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
 		/* fall through */
 	case CEPH_OSD_OP_WRITE:
+	case CEPH_OSD_OP_WRITEFULL:
 		rbd_osd_write_callback(obj_request);
 		break;
 	case CEPH_OSD_OP_STAT:
@@ -2401,7 +2403,10 @@
 				opcode = CEPH_OSD_OP_ZERO;
 		}
 	} else if (op_type == OBJ_OP_WRITE) {
-		opcode = CEPH_OSD_OP_WRITE;
+		if (!offset && length == object_size)
+			opcode = CEPH_OSD_OP_WRITEFULL;
+		else
+			opcode = CEPH_OSD_OP_WRITE;
 		osd_req_op_alloc_hint_init(osd_request, num_ops,
 					object_size, object_size);
 		num_ops++;
@@ -3760,6 +3765,7 @@
 	/* set io sizes to object size */
 	segment_size = rbd_obj_bytes(&rbd_dev->header);
 	blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
+	q->limits.max_sectors = queue_max_hw_sectors(q);
 	blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
 	blk_queue_max_segment_size(q, segment_size);
 	blk_queue_io_min(q, segment_size);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e93899c..6ca3549 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -144,7 +144,7 @@
 	do {
 		virtqueue_disable_cb(vq);
 		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
-			blk_mq_complete_request(vbr->req);
+			blk_mq_complete_request(vbr->req, vbr->req->errors);
 			req_done = true;
 		}
 		if (unlikely(virtqueue_is_broken(vq)))
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index deb3f00..7676575 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -212,6 +212,9 @@
 
 static int xen_blkif_disconnect(struct xen_blkif *blkif)
 {
+	struct pending_req *req, *n;
+	int i = 0, j;
+
 	if (blkif->xenblkd) {
 		kthread_stop(blkif->xenblkd);
 		wake_up(&blkif->shutdown_wq);
@@ -238,25 +241,6 @@
 	/* Remove all persistent grants and the cache of ballooned pages. */
 	xen_blkbk_free_caches(blkif);
 
-	return 0;
-}
-
-static void xen_blkif_free(struct xen_blkif *blkif)
-{
-	struct pending_req *req, *n;
-	int i = 0, j;
-
-	xen_blkif_disconnect(blkif);
-	xen_vbd_free(&blkif->vbd);
-
-	/* Make sure everything is drained before shutting down */
-	BUG_ON(blkif->persistent_gnt_c != 0);
-	BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
-	BUG_ON(blkif->free_pages_num != 0);
-	BUG_ON(!list_empty(&blkif->persistent_purge_list));
-	BUG_ON(!list_empty(&blkif->free_pages));
-	BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
-
 	/* Check that there is no request in use */
 	list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
 		list_del(&req->free_list);
@@ -272,6 +256,24 @@
 	}
 
 	WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
+	blkif->nr_ring_pages = 0;
+
+	return 0;
+}
+
+static void xen_blkif_free(struct xen_blkif *blkif)
+{
+
+	xen_blkif_disconnect(blkif);
+	xen_vbd_free(&blkif->vbd);
+
+	/* Make sure everything is drained before shutting down */
+	BUG_ON(blkif->persistent_gnt_c != 0);
+	BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
+	BUG_ON(blkif->free_pages_num != 0);
+	BUG_ON(!list_empty(&blkif->persistent_purge_list));
+	BUG_ON(!list_empty(&blkif->free_pages));
+	BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
 
 	kmem_cache_free(xen_blkif_cachep, blkif);
 }
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 0823a96..6111708 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1142,6 +1142,7 @@
 	RING_IDX i, rp;
 	unsigned long flags;
 	struct blkfront_info *info = (struct blkfront_info *)dev_id;
+	int error;
 
 	spin_lock_irqsave(&info->io_lock, flags);
 
@@ -1182,37 +1183,37 @@
 			continue;
 		}
 
-		req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+		error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
 		switch (bret->operation) {
 		case BLKIF_OP_DISCARD:
 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
 				struct request_queue *rq = info->rq;
 				printk(KERN_WARNING "blkfront: %s: %s op failed\n",
 					   info->gd->disk_name, op_name(bret->operation));
-				req->errors = -EOPNOTSUPP;
+				error = -EOPNOTSUPP;
 				info->feature_discard = 0;
 				info->feature_secdiscard = 0;
 				queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
 				queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
 			}
-			blk_mq_complete_request(req);
+			blk_mq_complete_request(req, error);
 			break;
 		case BLKIF_OP_FLUSH_DISKCACHE:
 		case BLKIF_OP_WRITE_BARRIER:
 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
 				printk(KERN_WARNING "blkfront: %s: %s op failed\n",
 				       info->gd->disk_name, op_name(bret->operation));
-				req->errors = -EOPNOTSUPP;
+				error = -EOPNOTSUPP;
 			}
 			if (unlikely(bret->status == BLKIF_RSP_ERROR &&
 				     info->shadow[id].req.u.rw.nr_segments == 0)) {
 				printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
 				       info->gd->disk_name, op_name(bret->operation));
-				req->errors = -EOPNOTSUPP;
+				error = -EOPNOTSUPP;
 			}
-			if (unlikely(req->errors)) {
-				if (req->errors == -EOPNOTSUPP)
-					req->errors = 0;
+			if (unlikely(error)) {
+				if (error == -EOPNOTSUPP)
+					error = 0;
 				info->feature_flush = 0;
 				xlvbd_flush(info);
 			}
@@ -1223,7 +1224,7 @@
 				dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
 					"request: %x\n", bret->status);
 
-			blk_mq_complete_request(req);
+			blk_mq_complete_request(req, error);
 			break;
 		default:
 			BUG();
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 0bd88c9..ec6af15 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -4,6 +4,7 @@
 
 config BT_INTEL
 	tristate
+	select REGMAP
 
 config BT_BCM
 	tristate
@@ -182,7 +183,8 @@
 
 config BT_HCIBPA10X
 	tristate "HCI BPA10x USB driver"
-	depends on USB
+	depends on USB && BT_HCIUART
+	select BT_HCIUART_H4
 	help
 	  Bluetooth HCI BPA10x USB driver.
 	  This driver provides support for the Digianswer BPA 100/105 Bluetooth
@@ -275,7 +277,7 @@
 	  The core driver to support Marvell Bluetooth devices.
 
 	  This driver is required if you want to support
-	  Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897.
+	  Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8997.
 
 	  Say Y here to compile Marvell Bluetooth driver
 	  into the kernel or say M to compile it as module.
@@ -289,7 +291,7 @@
 	  The driver for Marvell Bluetooth chipsets with SDIO interface.
 
 	  This driver is required if you want to use Marvell Bluetooth
-	  devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897
+	  devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8997
 	  chipsets are supported.
 
 	  Say Y here to compile support for Marvell BT-over-SDIO driver
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index e527a3e..fa893c3 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -93,6 +93,7 @@
 	{ USB_DEVICE(0x04CA, 0x300f) },
 	{ USB_DEVICE(0x04CA, 0x3010) },
 	{ USB_DEVICE(0x0930, 0x0219) },
+	{ USB_DEVICE(0x0930, 0x021c) },
 	{ USB_DEVICE(0x0930, 0x0220) },
 	{ USB_DEVICE(0x0930, 0x0227) },
 	{ USB_DEVICE(0x0b05, 0x17d0) },
@@ -104,6 +105,7 @@
 	{ USB_DEVICE(0x0CF3, 0x311F) },
 	{ USB_DEVICE(0x0cf3, 0x3121) },
 	{ USB_DEVICE(0x0CF3, 0x817a) },
+	{ USB_DEVICE(0x0CF3, 0x817b) },
 	{ USB_DEVICE(0x0cf3, 0xe003) },
 	{ USB_DEVICE(0x0CF3, 0xE004) },
 	{ USB_DEVICE(0x0CF3, 0xE005) },
@@ -153,6 +155,7 @@
 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
@@ -164,6 +167,7 @@
 	{ USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0CF3, 0x817b), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index a5c4d05..616ec2a 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -422,17 +422,12 @@
 
 	BT_DBG("hdev %p bfusb %p", hdev, data);
 
-	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	write_lock_irqsave(&data->lock, flags);
 
 	err = bfusb_rx_submit(data, NULL);
 	if (!err) {
 		for (i = 1; i < BFUSB_MAX_BULK_RX; i++)
 			bfusb_rx_submit(data, NULL);
-	} else {
-		clear_bit(HCI_RUNNING, &hdev->flags);
 	}
 
 	write_unlock_irqrestore(&data->lock, flags);
@@ -458,9 +453,6 @@
 
 	BT_DBG("hdev %p bfusb %p", hdev, data);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	write_lock_irqsave(&data->lock, flags);
 	write_unlock_irqrestore(&data->lock, flags);
 
@@ -479,9 +471,6 @@
 
 	BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		hdev->stat.cmd_tx++;
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 35e63aa..36fa1c9 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -390,7 +390,7 @@
 	for (i = 0; i < len; i++) {
 
 		/* Allocate packet */
-		if (info->rx_skb == NULL) {
+		if (!info->rx_skb) {
 			info->rx_state = RECV_WAIT_PACKET_TYPE;
 			info->rx_count = 0;
 			info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
@@ -628,9 +628,6 @@
 	if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
 		bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
 
-	if (test_and_set_bit(HCI_RUNNING, &(hdev->flags)))
-		return 0;
-
 	if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
 		unsigned int iobase = info->p_dev->resource[0]->start;
 
@@ -646,9 +643,6 @@
 {
 	struct bluecard_info *info = hci_get_drvdata(hdev);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
-		return 0;
-
 	bluecard_hci_flush(hdev);
 
 	if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 8a31991..49c397e2 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -35,7 +35,9 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-#define VERSION "0.10"
+#include "hci_uart.h"
+
+#define VERSION "0.11"
 
 static const struct usb_device_id bpa10x_table[] = {
 	/* Tektronix BPA 100/105 (Digianswer) */
@@ -56,112 +58,6 @@
 	struct sk_buff *rx_skb[2];
 };
 
-#define HCI_VENDOR_HDR_SIZE 5
-
-struct hci_vendor_hdr {
-	__u8    type;
-	__le16  snum;
-	__le16  dlen;
-} __packed;
-
-static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
-{
-	struct bpa10x_data *data = hci_get_drvdata(hdev);
-
-	BT_DBG("%s queue %d buffer %p count %d", hdev->name,
-							queue, buf, count);
-
-	if (queue < 0 || queue > 1)
-		return -EILSEQ;
-
-	hdev->stat.byte_rx += count;
-
-	while (count) {
-		struct sk_buff *skb = data->rx_skb[queue];
-		struct { __u8 type; int expect; } *scb;
-		int type, len = 0;
-
-		if (!skb) {
-			/* Start of the frame */
-
-			type = *((__u8 *) buf);
-			count--; buf++;
-
-			switch (type) {
-			case HCI_EVENT_PKT:
-				if (count >= HCI_EVENT_HDR_SIZE) {
-					struct hci_event_hdr *h = buf;
-					len = HCI_EVENT_HDR_SIZE + h->plen;
-				} else
-					return -EILSEQ;
-				break;
-
-			case HCI_ACLDATA_PKT:
-				if (count >= HCI_ACL_HDR_SIZE) {
-					struct hci_acl_hdr *h = buf;
-					len = HCI_ACL_HDR_SIZE +
-							__le16_to_cpu(h->dlen);
-				} else
-					return -EILSEQ;
-				break;
-
-			case HCI_SCODATA_PKT:
-				if (count >= HCI_SCO_HDR_SIZE) {
-					struct hci_sco_hdr *h = buf;
-					len = HCI_SCO_HDR_SIZE + h->dlen;
-				} else
-					return -EILSEQ;
-				break;
-
-			case HCI_VENDOR_PKT:
-				if (count >= HCI_VENDOR_HDR_SIZE) {
-					struct hci_vendor_hdr *h = buf;
-					len = HCI_VENDOR_HDR_SIZE +
-							__le16_to_cpu(h->dlen);
-				} else
-					return -EILSEQ;
-				break;
-			}
-
-			skb = bt_skb_alloc(len, GFP_ATOMIC);
-			if (!skb) {
-				BT_ERR("%s no memory for packet", hdev->name);
-				return -ENOMEM;
-			}
-
-			data->rx_skb[queue] = skb;
-
-			scb = (void *) skb->cb;
-			scb->type = type;
-			scb->expect = len;
-		} else {
-			/* Continuation */
-
-			scb = (void *) skb->cb;
-			len = scb->expect;
-		}
-
-		len = min(len, count);
-
-		memcpy(skb_put(skb, len), buf, len);
-
-		scb->expect -= len;
-
-		if (scb->expect == 0) {
-			/* Complete frame */
-
-			data->rx_skb[queue] = NULL;
-
-			bt_cb(skb)->pkt_type = scb->type;
-			hci_recv_frame(hdev, skb);
-		}
-
-		count -= len; buf += len;
-	}
-
-	return 0;
-}
-
 static void bpa10x_tx_complete(struct urb *urb)
 {
 	struct sk_buff *skb = urb->context;
@@ -184,6 +80,22 @@
 	kfree_skb(skb);
 }
 
+#define HCI_VENDOR_HDR_SIZE 5
+
+#define HCI_RECV_VENDOR \
+	.type = HCI_VENDOR_PKT, \
+	.hlen = HCI_VENDOR_HDR_SIZE, \
+	.loff = 3, \
+	.lsize = 2, \
+	.maxlen = HCI_MAX_FRAME_SIZE
+
+static const struct h4_recv_pkt bpa10x_recv_pkts[] = {
+	{ H4_RECV_ACL,     .recv = hci_recv_frame },
+	{ H4_RECV_SCO,     .recv = hci_recv_frame },
+	{ H4_RECV_EVENT,   .recv = hci_recv_frame },
+	{ HCI_RECV_VENDOR, .recv = hci_recv_diag  },
+};
+
 static void bpa10x_rx_complete(struct urb *urb)
 {
 	struct hci_dev *hdev = urb->context;
@@ -197,11 +109,17 @@
 		return;
 
 	if (urb->status == 0) {
-		if (bpa10x_recv(hdev, usb_pipebulk(urb->pipe),
+		bool idx = usb_pipebulk(urb->pipe);
+
+		data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
 						urb->transfer_buffer,
-						urb->actual_length) < 0) {
+						urb->actual_length,
+						bpa10x_recv_pkts,
+						ARRAY_SIZE(bpa10x_recv_pkts));
+		if (IS_ERR(data->rx_skb[idx])) {
 			BT_ERR("%s corrupted event packet", hdev->name);
 			hdev->stat.err_rx++;
+			data->rx_skb[idx] = NULL;
 		}
 	}
 
@@ -304,9 +222,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	err = bpa10x_submit_intr_urb(hdev);
 	if (err < 0)
 		goto error;
@@ -320,8 +235,6 @@
 error:
 	usb_kill_anchored_urbs(&data->rx_anchor);
 
-	clear_bit(HCI_RUNNING, &hdev->flags);
-
 	return err;
 }
 
@@ -331,9 +244,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	usb_kill_anchored_urbs(&data->rx_anchor);
 
 	return 0;
@@ -350,6 +260,24 @@
 	return 0;
 }
 
+static int bpa10x_setup(struct hci_dev *hdev)
+{
+	const u8 req[] = { 0x07 };
+	struct sk_buff *skb;
+
+	BT_DBG("%s", hdev->name);
+
+	/* Read revision string */
+	skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+
+	kfree_skb(skb);
+	return 0;
+}
+
 static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct bpa10x_data *data = hci_get_drvdata(hdev);
@@ -360,9 +288,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	skb->dev = (void *) hdev;
 
 	urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -431,6 +356,25 @@
 	return 0;
 }
 
+static int bpa10x_set_diag(struct hci_dev *hdev, bool enable)
+{
+	const u8 req[] = { 0x00, enable };
+	struct sk_buff *skb;
+
+	BT_DBG("%s", hdev->name);
+
+	if (!test_bit(HCI_RUNNING, &hdev->flags))
+		return -ENETDOWN;
+
+	/* Enable sniffer operation */
+	skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	kfree_skb(skb);
+	return 0;
+}
+
 static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
 	struct bpa10x_data *data;
@@ -465,7 +409,9 @@
 	hdev->open     = bpa10x_open;
 	hdev->close    = bpa10x_close;
 	hdev->flush    = bpa10x_flush;
+	hdev->setup    = bpa10x_setup;
 	hdev->send     = bpa10x_send_frame;
+	hdev->set_diag = bpa10x_set_diag;
 
 	set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
 
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 772a277..5803aae 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -233,7 +233,7 @@
 		info->hdev->stat.byte_rx++;
 
 		/* Allocate packet */
-		if (info->rx_skb == NULL) {
+		if (!info->rx_skb) {
 			info->rx_state = RECV_WAIT_PACKET_TYPE;
 			info->rx_count = 0;
 			info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
@@ -270,7 +270,6 @@
 				/* Unknown packet */
 				BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
 				info->hdev->stat.err_rx++;
-				clear_bit(HCI_RUNNING, &(info->hdev->flags));
 
 				kfree_skb(info->rx_skb);
 				info->rx_skb = NULL;
@@ -395,17 +394,12 @@
 
 static int bt3c_hci_open(struct hci_dev *hdev)
 {
-	set_bit(HCI_RUNNING, &(hdev->flags));
-
 	return 0;
 }
 
 
 static int bt3c_hci_close(struct hci_dev *hdev)
 {
-	if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
-		return 0;
-
 	bt3c_hci_flush(hdev);
 
 	return 0;
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 02ed816..0b69794 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -181,6 +181,27 @@
 	return 0;
 }
 
+static struct sk_buff *btbcm_read_local_name(struct hci_dev *hdev)
+{
+	struct sk_buff *skb;
+
+	skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL,
+			     HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: BCM: Reading local name failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return skb;
+	}
+
+	if (skb->len != sizeof(struct hci_rp_read_local_name)) {
+		BT_ERR("%s: BCM: Local name length mismatch", hdev->name);
+		kfree_skb(skb);
+		return ERR_PTR(-EIO);
+	}
+
+	return skb;
+}
+
 static struct sk_buff *btbcm_read_local_version(struct hci_dev *hdev)
 {
 	struct sk_buff *skb;
@@ -302,7 +323,7 @@
 	}
 
 	BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
-		hw_name ? : "BCM", (subver & 0x7000) >> 13,
+		hw_name ? : "BCM", (subver & 0xe000) >> 13,
 		(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
 
 	return 0;
@@ -332,7 +353,7 @@
 	kfree_skb(skb);
 
 	BT_INFO("%s: BCM (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
-		(subver & 0x7000) >> 13, (subver & 0x1f00) >> 8,
+		(subver & 0xe000) >> 13, (subver & 0x1f00) >> 8,
 		(subver & 0x00ff), rev & 0x0fff);
 
 	btbcm_check_bdaddr(hdev);
@@ -393,6 +414,14 @@
 	BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
 	kfree_skb(skb);
 
+	/* Read Local Name */
+	skb = btbcm_read_local_name(hdev);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+	kfree_skb(skb);
+
 	switch ((rev & 0xf000) >> 12) {
 	case 0:
 	case 3:
@@ -432,7 +461,7 @@
 	}
 
 	BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
-		hw_name ? : "BCM", (subver & 0x7000) >> 13,
+		hw_name ? : "BCM", (subver & 0xe000) >> 13,
 		(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
 
 	err = request_firmware(&fw, fw_name, &hdev->dev);
@@ -461,9 +490,17 @@
 	kfree_skb(skb);
 
 	BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
-		hw_name ? : "BCM", (subver & 0x7000) >> 13,
+		hw_name ? : "BCM", (subver & 0xe000) >> 13,
 		(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
 
+	/* Read Local Name */
+	skb = btbcm_read_local_name(hdev);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+	kfree_skb(skb);
+
 	btbcm_check_bdaddr(hdev);
 
 	set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
@@ -475,12 +512,34 @@
 int btbcm_setup_apple(struct hci_dev *hdev)
 {
 	struct sk_buff *skb;
+	int err;
+
+	/* Reset */
+	err = btbcm_reset(hdev);
+	if (err)
+		return err;
 
 	/* Read Verbose Config Version Info */
 	skb = btbcm_read_verbose_config(hdev);
 	if (!IS_ERR(skb)) {
-		BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
-			get_unaligned_le16(skb->data + 5));
+		BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name,
+			skb->data[1], get_unaligned_le16(skb->data + 5));
+		kfree_skb(skb);
+	}
+
+	/* Read USB Product Info */
+	skb = btbcm_read_usb_product(hdev);
+	if (!IS_ERR(skb)) {
+		BT_INFO("%s: BCM: product %4.4x:%4.4x", hdev->name,
+			get_unaligned_le16(skb->data + 1),
+			get_unaligned_le16(skb->data + 3));
+		kfree_skb(skb);
+	}
+
+	/* Read Local Name */
+	skb = btbcm_read_local_name(hdev);
+	if (!IS_ERR(skb)) {
+		BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
 		kfree_skb(skb);
 	}
 
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 9e18988..1f13e61 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -23,6 +23,7 @@
 
 #include <linux/module.h>
 #include <linux/firmware.h>
+#include <linux/regmap.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -90,6 +91,75 @@
 }
 EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
 
+int btintel_set_diag(struct hci_dev *hdev, bool enable)
+{
+	struct sk_buff *skb;
+	u8 param[3];
+	int err;
+
+	if (enable) {
+		param[0] = 0x03;
+		param[1] = 0x03;
+		param[2] = 0x03;
+	} else {
+		param[0] = 0x00;
+		param[1] = 0x00;
+		param[2] = 0x00;
+	}
+
+	skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		if (err == -ENODATA)
+			goto done;
+		BT_ERR("%s: Changing Intel diagnostic mode failed (%d)",
+		       hdev->name, err);
+		return err;
+	}
+	kfree_skb(skb);
+
+done:
+	btintel_set_event_mask(hdev, enable);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_set_diag);
+
+int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable)
+{
+	struct sk_buff *skb;
+	u8 param[2];
+	int err;
+
+	param[0] = 0x01;
+	param[1] = 0x00;
+
+	skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Entering Intel manufacturer mode failed (%d)",
+		       hdev->name, err);
+		return PTR_ERR(skb);
+	}
+	kfree_skb(skb);
+
+	err = btintel_set_diag(hdev, enable);
+
+	param[0] = 0x00;
+	param[1] = 0x00;
+
+	skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)",
+		       hdev->name, err);
+		return PTR_ERR(skb);
+	}
+	kfree_skb(skb);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(btintel_set_diag_mfg);
+
 void btintel_hw_error(struct hci_dev *hdev, u8 code)
 {
 	struct sk_buff *skb;
@@ -215,6 +285,259 @@
 }
 EXPORT_SYMBOL_GPL(btintel_load_ddc_config);
 
+int btintel_set_event_mask(struct hci_dev *hdev, bool debug)
+{
+	u8 mask[8] = { 0x87, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+	struct sk_buff *skb;
+	int err;
+
+	if (debug)
+		mask[1] |= 0x62;
+
+	skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Setting Intel event mask failed (%d)",
+		       hdev->name, err);
+		return err;
+	}
+	kfree_skb(skb);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_set_event_mask);
+
+int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
+{
+	struct sk_buff *skb;
+	u8 param[2];
+	int err;
+
+	param[0] = 0x01;
+	param[1] = 0x00;
+
+	skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Entering Intel manufacturer mode failed (%d)",
+		       hdev->name, err);
+		return PTR_ERR(skb);
+	}
+	kfree_skb(skb);
+
+	err = btintel_set_event_mask(hdev, debug);
+
+	param[0] = 0x00;
+	param[1] = 0x00;
+
+	skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)",
+		       hdev->name, err);
+		return PTR_ERR(skb);
+	}
+	kfree_skb(skb);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg);
+
+/* ------- REGMAP IBT SUPPORT ------- */
+
+#define IBT_REG_MODE_8BIT  0x00
+#define IBT_REG_MODE_16BIT 0x01
+#define IBT_REG_MODE_32BIT 0x02
+
+struct regmap_ibt_context {
+	struct hci_dev *hdev;
+	__u16 op_write;
+	__u16 op_read;
+};
+
+struct ibt_cp_reg_access {
+	__le32  addr;
+	__u8    mode;
+	__u8    len;
+	__u8    data[0];
+} __packed;
+
+struct ibt_rp_reg_access {
+	__u8    status;
+	__le32  addr;
+	__u8    data[0];
+} __packed;
+
+static int regmap_ibt_read(void *context, const void *addr, size_t reg_size,
+			   void *val, size_t val_size)
+{
+	struct regmap_ibt_context *ctx = context;
+	struct ibt_cp_reg_access cp;
+	struct ibt_rp_reg_access *rp;
+	struct sk_buff *skb;
+	int err = 0;
+
+	if (reg_size != sizeof(__le32))
+		return -EINVAL;
+
+	switch (val_size) {
+	case 1:
+		cp.mode = IBT_REG_MODE_8BIT;
+		break;
+	case 2:
+		cp.mode = IBT_REG_MODE_16BIT;
+		break;
+	case 4:
+		cp.mode = IBT_REG_MODE_32BIT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* regmap provides a little-endian formatted addr */
+	cp.addr = *(__le32 *)addr;
+	cp.len = val_size;
+
+	bt_dev_dbg(ctx->hdev, "Register (0x%x) read", le32_to_cpu(cp.addr));
+
+	skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp,
+			   HCI_CMD_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error (%d)",
+			   le32_to_cpu(cp.addr), err);
+		return err;
+	}
+
+	if (skb->len != sizeof(*rp) + val_size) {
+		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad len",
+			   le32_to_cpu(cp.addr));
+		err = -EINVAL;
+		goto done;
+	}
+
+	rp = (struct ibt_rp_reg_access *)skb->data;
+
+	if (rp->addr != cp.addr) {
+		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad addr",
+			   le32_to_cpu(rp->addr));
+		err = -EINVAL;
+		goto done;
+	}
+
+	memcpy(val, rp->data, val_size);
+
+done:
+	kfree_skb(skb);
+	return err;
+}
+
+static int regmap_ibt_gather_write(void *context,
+				   const void *addr, size_t reg_size,
+				   const void *val, size_t val_size)
+{
+	struct regmap_ibt_context *ctx = context;
+	struct ibt_cp_reg_access *cp;
+	struct sk_buff *skb;
+	int plen = sizeof(*cp) + val_size;
+	u8 mode;
+	int err = 0;
+
+	if (reg_size != sizeof(__le32))
+		return -EINVAL;
+
+	switch (val_size) {
+	case 1:
+		mode = IBT_REG_MODE_8BIT;
+		break;
+	case 2:
+		mode = IBT_REG_MODE_16BIT;
+		break;
+	case 4:
+		mode = IBT_REG_MODE_32BIT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	cp = kmalloc(plen, GFP_KERNEL);
+	if (!cp)
+		return -ENOMEM;
+
+	/* regmap provides a little-endian formatted addr/value */
+	cp->addr = *(__le32 *)addr;
+	cp->mode = mode;
+	cp->len = val_size;
+	memcpy(&cp->data, val, val_size);
+
+	bt_dev_dbg(ctx->hdev, "Register (0x%x) write", le32_to_cpu(cp->addr));
+
+	skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		bt_dev_err(ctx->hdev, "regmap: Register (0x%x) write error (%d)",
+			   le32_to_cpu(cp->addr), err);
+		goto done;
+	}
+	kfree_skb(skb);
+
+done:
+	kfree(cp);
+	return err;
+}
+
+static int regmap_ibt_write(void *context, const void *data, size_t count)
+{
+	/* data contains register+value, since we only support 32bit addr,
+	 * minimum data size is 4 bytes.
+	 */
+	if (WARN_ONCE(count < 4, "Invalid register access"))
+		return -EINVAL;
+
+	return regmap_ibt_gather_write(context, data, 4, data + 4, count - 4);
+}
+
+static void regmap_ibt_free_context(void *context)
+{
+	kfree(context);
+}
+
+static struct regmap_bus regmap_ibt = {
+	.read = regmap_ibt_read,
+	.write = regmap_ibt_write,
+	.gather_write = regmap_ibt_gather_write,
+	.free_context = regmap_ibt_free_context,
+	.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+	.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+/* Config is the same for all register regions */
+static const struct regmap_config regmap_ibt_cfg = {
+	.name      = "btintel_regmap",
+	.reg_bits  = 32,
+	.val_bits  = 32,
+};
+
+struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
+				   u16 opcode_write)
+{
+	struct regmap_ibt_context *ctx;
+
+	bt_dev_info(hdev, "regmap: Init R%x-W%x region", opcode_read,
+		    opcode_write);
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return ERR_PTR(-ENOMEM);
+
+	ctx->op_read = opcode_read;
+	ctx->op_write = opcode_write;
+	ctx->hdev = hdev;
+
+	return regmap_init(&hdev->dev, &regmap_ibt, ctx, &regmap_ibt_cfg);
+}
+EXPORT_SYMBOL_GPL(btintel_regmap_init);
+
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
 MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 52deaf2..07e58e0 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -73,12 +73,19 @@
 
 int btintel_check_bdaddr(struct hci_dev *hdev);
 int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+int btintel_set_diag(struct hci_dev *hdev, bool enable);
+int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable);
 void btintel_hw_error(struct hci_dev *hdev, u8 code);
 
 void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
 int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
 			const void *param);
 int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name);
+int btintel_set_event_mask(struct hci_dev *hdev, bool debug);
+int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug);
+
+struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
+				   u16 opcode_write);
 
 #else
 
@@ -92,6 +99,16 @@
 	return -EOPNOTSUPP;
 }
 
+static inline int btintel_set_diag(struct hci_dev *hdev, bool enable)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable)
+{
+	return -EOPNOTSUPP;
+}
+
 static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
 {
 }
@@ -113,4 +130,20 @@
 	return -EOPNOTSUPP;
 }
 
+static inline int btintel_set_event_mask(struct hci_dev *hdev, bool debug)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev,
+						 u16 opcode_read,
+						 u16 opcode_write)
+{
+	return ERR_PTR(-EINVAL);
+}
 #endif
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index bc110f6..6af9173 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -184,7 +184,7 @@
 	}
 
 	skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
-	if (skb == NULL) {
+	if (!skb) {
 		BT_ERR("No free skb");
 		return -ENOMEM;
 	}
@@ -436,13 +436,6 @@
 
 	BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
-		BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags);
-		print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET,
-							skb->data, skb->len);
-		return -EBUSY;
-	}
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		hdev->stat.cmd_tx++;
@@ -477,9 +470,6 @@
 {
 	struct btmrvl_private *priv = hci_get_drvdata(hdev);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	skb_queue_purge(&priv->adapter->tx_queue);
 
 	return 0;
@@ -487,8 +477,6 @@
 
 static int btmrvl_open(struct hci_dev *hdev)
 {
-	set_bit(HCI_RUNNING, &hdev->flags);
-
 	return 0;
 }
 
@@ -528,14 +516,17 @@
 		ret = of_property_read_u8_array(dt_node, "btmrvl,cal-data",
 						cal_data + BT_CAL_HDR_LEN,
 						BT_CAL_DATA_SIZE);
-		if (ret)
+		if (ret) {
+			of_node_put(dt_node);
 			return ret;
+		}
 
 		BT_DBG("Use cal data from device tree");
 		ret = btmrvl_download_cal_data(priv, cal_data,
 					       BT_CAL_DATA_SIZE);
 		if (ret) {
 			BT_ERR("Fail to download calibrate data");
+			of_node_put(dt_node);
 			return ret;
 		}
 	}
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index b9978a7..71ea2a3 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -146,6 +146,29 @@
 	.fw_dump_end = 0xea,
 };
 
+static const struct btmrvl_sdio_card_reg btmrvl_reg_8997 = {
+	.cfg = 0x00,
+	.host_int_mask = 0x08,
+	.host_intstatus = 0x0c,
+	.card_status = 0x5c,
+	.sq_read_base_addr_a0 = 0xf8,
+	.sq_read_base_addr_a1 = 0xf9,
+	.card_revision = 0xc8,
+	.card_fw_status0 = 0xe8,
+	.card_fw_status1 = 0xe9,
+	.card_rx_len = 0xea,
+	.card_rx_unit = 0xeb,
+	.io_port_0 = 0xe4,
+	.io_port_1 = 0xe5,
+	.io_port_2 = 0xe6,
+	.int_read_to_clear = true,
+	.host_int_rsr = 0x04,
+	.card_misc_cfg = 0xD8,
+	.fw_dump_ctrl = 0xf0,
+	.fw_dump_start = 0xf1,
+	.fw_dump_end = 0xf8,
+};
+
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
 	.helper		= "mrvl/sd8688_helper.bin",
 	.firmware	= "mrvl/sd8688.bin",
@@ -191,25 +214,37 @@
 	.supports_fw_dump = true,
 };
 
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
+	.helper         = NULL,
+	.firmware       = "mrvl/sd8997_uapsta.bin",
+	.reg            = &btmrvl_reg_8997,
+	.support_pscan_win_report = true,
+	.sd_blksz_fw_dl = 256,
+	.supports_fw_dump = true,
+};
+
 static const struct sdio_device_id btmrvl_sdio_ids[] = {
 	/* Marvell SD8688 Bluetooth device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
-			.driver_data = (unsigned long) &btmrvl_sdio_sd8688 },
+			.driver_data = (unsigned long)&btmrvl_sdio_sd8688 },
 	/* Marvell SD8787 Bluetooth device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
-			.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+			.driver_data = (unsigned long)&btmrvl_sdio_sd8787 },
 	/* Marvell SD8787 Bluetooth AMP device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
-			.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+			.driver_data = (unsigned long)&btmrvl_sdio_sd8787 },
 	/* Marvell SD8797 Bluetooth device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
-			.driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
+			.driver_data = (unsigned long)&btmrvl_sdio_sd8797 },
 	/* Marvell SD8887 Bluetooth device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9136),
 			.driver_data = (unsigned long)&btmrvl_sdio_sd8887 },
 	/* Marvell SD8897 Bluetooth device */
 	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
-			.driver_data = (unsigned long) &btmrvl_sdio_sd8897 },
+			.driver_data = (unsigned long)&btmrvl_sdio_sd8897 },
+	/* Marvell SD8997 Bluetooth device */
+	{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9142),
+			.driver_data = (unsigned long)&btmrvl_sdio_sd8997 },
 
 	{ }	/* Terminating entry */
 };
@@ -619,7 +654,7 @@
 
 	/* Allocate buffer */
 	skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
-	if (skb == NULL) {
+	if (!skb) {
 		BT_ERR("No free skb");
 		ret = -ENOMEM;
 		goto exit;
@@ -1278,6 +1313,12 @@
 
 		if (memory_size == 0) {
 			BT_INFO("Firmware dump finished!");
+			sdio_writeb(card->func, FW_DUMP_READ_DONE,
+				    card->reg->fw_dump_ctrl, &ret);
+			if (ret) {
+				BT_ERR("SDIO Write MEMDUMP_FINISH ERR");
+				goto done;
+			}
 			break;
 		}
 
@@ -1616,3 +1657,4 @@
 MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
 MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
 MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 83f6437..7b62442 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -194,21 +194,15 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	sdio_claim_host(data->func);
 
 	err = sdio_enable_func(data->func);
-	if (err < 0) {
-		clear_bit(HCI_RUNNING, &hdev->flags);
+	if (err < 0)
 		goto release;
-	}
 
 	err = sdio_claim_irq(data->func, btsdio_interrupt);
 	if (err < 0) {
 		sdio_disable_func(data->func);
-		clear_bit(HCI_RUNNING, &hdev->flags);
 		goto release;
 	}
 
@@ -229,9 +223,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	sdio_claim_host(data->func);
 
 	sdio_writeb(data->func, 0x00, REG_EN_INTRD, NULL);
@@ -261,9 +252,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		hdev->stat.cmd_tx++;
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index abb4d21..bb8e402 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -38,7 +38,7 @@
 #include <linux/serial.h>
 #include <linux/serial_reg.h>
 #include <linux/bitops.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <pcmcia/cistpl.h>
 #include <pcmcia/ciscode.h>
@@ -188,7 +188,7 @@
 		info->hdev->stat.byte_rx++;
 
 		/* Allocate packet */
-		if (info->rx_skb == NULL) {
+		if (!info->rx_skb) {
 			info->rx_state = RECV_WAIT_PACKET_TYPE;
 			info->rx_count = 0;
 			info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
@@ -223,7 +223,6 @@
 				/* Unknown packet */
 				BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
 				info->hdev->stat.err_rx++;
-				clear_bit(HCI_RUNNING, &(info->hdev->flags));
 
 				kfree_skb(info->rx_skb);
 				info->rx_skb = NULL;
@@ -409,17 +408,12 @@
 
 static int btuart_hci_open(struct hci_dev *hdev)
 {
-	set_bit(HCI_RUNNING, &(hdev->flags));
-
 	return 0;
 }
 
 
 static int btuart_hci_close(struct hci_dev *hdev)
 {
-	if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
-		return 0;
-
 	btuart_hci_flush(hdev);
 
 	return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index dfaaea2..e33dacf 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -60,6 +60,8 @@
 #define BTUSB_QCA_ROME		0x8000
 #define BTUSB_BCM_APPLE		0x10000
 #define BTUSB_REALTEK		0x20000
+#define BTUSB_BCM2045		0x40000
+#define BTUSB_IFNUM_2		0x80000
 
 static const struct usb_device_id btusb_table[] = {
 	/* Generic Bluetooth USB device */
@@ -73,7 +75,7 @@
 
 	/* Apple-specific (Broadcom) devices */
 	{ USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01),
-	  .driver_info = BTUSB_BCM_APPLE },
+	  .driver_info = BTUSB_BCM_APPLE | BTUSB_IFNUM_2 },
 
 	/* MediaTek MT76x0E */
 	{ USB_DEVICE(0x0e8d, 0x763f) },
@@ -124,6 +126,9 @@
 	/* Broadcom BCM20702B0 (Dynex/Insignia) */
 	{ USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
 
+	/* Broadcom BCM43142A0 (Foxconn/Lenovo) */
+	{ USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM },
+
 	/* Foxconn - Hon Hai */
 	{ USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01),
 	  .driver_info = BTUSB_BCM_PATCHRAM },
@@ -164,6 +169,9 @@
 	/* Broadcom BCM2033 without firmware */
 	{ USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE },
 
+	/* Broadcom BCM2045 devices */
+	{ USB_DEVICE(0x0a5c, 0x2045), .driver_info = BTUSB_BCM2045 },
+
 	/* Atheros 3011 with sflash firmware */
 	{ USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
 	{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
@@ -195,6 +203,7 @@
 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
@@ -206,6 +215,7 @@
 	{ USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
+	{ USB_DEVICE(0x0cf3, 0x817b), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
 	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
@@ -341,12 +351,14 @@
 #define BTUSB_FIRMWARE_FAILED	8
 #define BTUSB_BOOTING		9
 #define BTUSB_RESET_RESUME	10
+#define BTUSB_DIAG_RUNNING	11
 
 struct btusb_data {
 	struct hci_dev       *hdev;
 	struct usb_device    *udev;
 	struct usb_interface *intf;
 	struct usb_interface *isoc;
+	struct usb_interface *diag;
 
 	unsigned long flags;
 
@@ -361,6 +373,7 @@
 	struct usb_anchor intr_anchor;
 	struct usb_anchor bulk_anchor;
 	struct usb_anchor isoc_anchor;
+	struct usb_anchor diag_anchor;
 	spinlock_t rxlock;
 
 	struct sk_buff *evt_skb;
@@ -372,6 +385,8 @@
 	struct usb_endpoint_descriptor *bulk_rx_ep;
 	struct usb_endpoint_descriptor *isoc_tx_ep;
 	struct usb_endpoint_descriptor *isoc_rx_ep;
+	struct usb_endpoint_descriptor *diag_tx_ep;
+	struct usb_endpoint_descriptor *diag_rx_ep;
 
 	__u8 cmdreq_type;
 	__u8 cmdreq;
@@ -869,6 +884,92 @@
 	return err;
 }
 
+static void btusb_diag_complete(struct urb *urb)
+{
+	struct hci_dev *hdev = urb->context;
+	struct btusb_data *data = hci_get_drvdata(hdev);
+	int err;
+
+	BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status,
+	       urb->actual_length);
+
+	if (urb->status == 0) {
+		struct sk_buff *skb;
+
+		skb = bt_skb_alloc(urb->actual_length, GFP_ATOMIC);
+		if (skb) {
+			memcpy(skb_put(skb, urb->actual_length),
+			       urb->transfer_buffer, urb->actual_length);
+			hci_recv_diag(hdev, skb);
+		}
+	} else if (urb->status == -ENOENT) {
+		/* Avoid suspend failed when usb_kill_urb */
+		return;
+	}
+
+	if (!test_bit(BTUSB_DIAG_RUNNING, &data->flags))
+		return;
+
+	usb_anchor_urb(urb, &data->diag_anchor);
+	usb_mark_last_busy(data->udev);
+
+	err = usb_submit_urb(urb, GFP_ATOMIC);
+	if (err < 0) {
+		/* -EPERM: urb is being killed;
+		 * -ENODEV: device got disconnected */
+		if (err != -EPERM && err != -ENODEV)
+			BT_ERR("%s urb %p failed to resubmit (%d)",
+			       hdev->name, urb, -err);
+		usb_unanchor_urb(urb);
+	}
+}
+
+static int btusb_submit_diag_urb(struct hci_dev *hdev, gfp_t mem_flags)
+{
+	struct btusb_data *data = hci_get_drvdata(hdev);
+	struct urb *urb;
+	unsigned char *buf;
+	unsigned int pipe;
+	int err, size = HCI_MAX_FRAME_SIZE;
+
+	BT_DBG("%s", hdev->name);
+
+	if (!data->diag_rx_ep)
+		return -ENODEV;
+
+	urb = usb_alloc_urb(0, mem_flags);
+	if (!urb)
+		return -ENOMEM;
+
+	buf = kmalloc(size, mem_flags);
+	if (!buf) {
+		usb_free_urb(urb);
+		return -ENOMEM;
+	}
+
+	pipe = usb_rcvbulkpipe(data->udev, data->diag_rx_ep->bEndpointAddress);
+
+	usb_fill_bulk_urb(urb, data->udev, pipe, buf, size,
+			  btusb_diag_complete, hdev);
+
+	urb->transfer_flags |= URB_FREE_BUFFER;
+
+	usb_mark_last_busy(data->udev);
+	usb_anchor_urb(urb, &data->diag_anchor);
+
+	err = usb_submit_urb(urb, mem_flags);
+	if (err < 0) {
+		if (err != -EPERM && err != -ENODEV)
+			BT_ERR("%s urb %p submission failed (%d)",
+			       hdev->name, urb, -err);
+		usb_unanchor_urb(urb);
+	}
+
+	usb_free_urb(urb);
+
+	return err;
+}
+
 static void btusb_tx_complete(struct urb *urb)
 {
 	struct sk_buff *skb = urb->context;
@@ -940,9 +1041,6 @@
 
 	data->intf->needs_remote_wakeup = 1;
 
-	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
-		goto done;
-
 	if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
 		goto done;
 
@@ -959,13 +1057,17 @@
 	set_bit(BTUSB_BULK_RUNNING, &data->flags);
 	btusb_submit_bulk_urb(hdev, GFP_KERNEL);
 
+	if (data->diag) {
+		if (!btusb_submit_diag_urb(hdev, GFP_KERNEL))
+			set_bit(BTUSB_DIAG_RUNNING, &data->flags);
+	}
+
 done:
 	usb_autopm_put_interface(data->intf);
 	return 0;
 
 failed:
 	clear_bit(BTUSB_INTR_RUNNING, &data->flags);
-	clear_bit(HCI_RUNNING, &hdev->flags);
 	usb_autopm_put_interface(data->intf);
 	return err;
 }
@@ -975,6 +1077,7 @@
 	usb_kill_anchored_urbs(&data->intr_anchor);
 	usb_kill_anchored_urbs(&data->bulk_anchor);
 	usb_kill_anchored_urbs(&data->isoc_anchor);
+	usb_kill_anchored_urbs(&data->diag_anchor);
 }
 
 static int btusb_close(struct hci_dev *hdev)
@@ -984,15 +1087,13 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	cancel_work_sync(&data->work);
 	cancel_work_sync(&data->waker);
 
 	clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
 	clear_bit(BTUSB_BULK_RUNNING, &data->flags);
 	clear_bit(BTUSB_INTR_RUNNING, &data->flags);
+	clear_bit(BTUSB_DIAG_RUNNING, &data->flags);
 
 	btusb_stop_traffic(data);
 	btusb_free_frags(data);
@@ -1156,9 +1257,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		urb = alloc_ctrl_urb(hdev, skb);
@@ -1603,8 +1701,7 @@
 		BT_INFO("%s: Intel device is already patched. patch num: %02x",
 			hdev->name, ver->fw_patch_num);
 		kfree_skb(skb);
-		btintel_check_bdaddr(hdev);
-		return 0;
+		goto complete;
 	}
 
 	/* Opens the firmware patch file based on the firmware version read
@@ -1616,8 +1713,7 @@
 	fw = btusb_setup_intel_get_fw(hdev, ver);
 	if (!fw) {
 		kfree_skb(skb);
-		btintel_check_bdaddr(hdev);
-		return 0;
+		goto complete;
 	}
 	fw_ptr = fw->data;
 
@@ -1690,8 +1786,7 @@
 	BT_INFO("%s: Intel Bluetooth firmware patch completed and activated",
 		hdev->name);
 
-	btintel_check_bdaddr(hdev);
-	return 0;
+	goto complete;
 
 exit_mfg_disable:
 	/* Disable the manufacturer mode without reset */
@@ -1706,8 +1801,7 @@
 
 	BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name);
 
-	btintel_check_bdaddr(hdev);
-	return 0;
+	goto complete;
 
 exit_mfg_deactivate:
 	release_firmware(fw);
@@ -1727,6 +1821,12 @@
 	BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated",
 		hdev->name);
 
+complete:
+	/* Set the event mask for Intel specific vendor events. This enables
+	 * a few extra events that are useful during general operation.
+	 */
+	btintel_set_event_mask_mfg(hdev, false);
+
 	btintel_check_bdaddr(hdev);
 	return 0;
 }
@@ -1843,9 +1943,6 @@
 
 	BT_DBG("%s", hdev->name);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	switch (bt_cb(skb)->pkt_type) {
 	case HCI_COMMAND_PKT:
 		if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
@@ -2019,6 +2116,15 @@
 	BT_INFO("%s: Secure boot is %s", hdev->name,
 		params->secure_boot ? "enabled" : "disabled");
 
+	BT_INFO("%s: OTP lock is %s", hdev->name,
+		params->otp_lock ? "enabled" : "disabled");
+
+	BT_INFO("%s: API lock is %s", hdev->name,
+		params->api_lock ? "enabled" : "disabled");
+
+	BT_INFO("%s: Debug lock is %s", hdev->name,
+		params->debug_lock ? "enabled" : "disabled");
+
 	BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
 		params->min_fw_build_nn, params->min_fw_build_cw,
 		2000 + params->min_fw_build_yy);
@@ -2235,6 +2341,15 @@
 	 */
 	btintel_load_ddc_config(hdev, fwname);
 
+	/* Set the event mask for Intel specific vendor events. This enables
+	 * a few extra events that are useful during general operation. It
+	 * does not enable any debugging related events.
+	 *
+	 * The device will function correctly without these events enabled
+	 * and thus no need to fail the setup.
+	 */
+	btintel_set_event_mask(hdev, false);
+
 	return 0;
 }
 
@@ -2560,19 +2675,115 @@
 	return 0;
 }
 
+#ifdef CONFIG_BT_HCIBTUSB_BCM
+static inline int __set_diag_interface(struct hci_dev *hdev)
+{
+	struct btusb_data *data = hci_get_drvdata(hdev);
+	struct usb_interface *intf = data->diag;
+	int i;
+
+	if (!data->diag)
+		return -ENODEV;
+
+	data->diag_tx_ep = NULL;
+	data->diag_rx_ep = NULL;
+
+	for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+		struct usb_endpoint_descriptor *ep_desc;
+
+		ep_desc = &intf->cur_altsetting->endpoint[i].desc;
+
+		if (!data->diag_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) {
+			data->diag_tx_ep = ep_desc;
+			continue;
+		}
+
+		if (!data->diag_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) {
+			data->diag_rx_ep = ep_desc;
+			continue;
+		}
+	}
+
+	if (!data->diag_tx_ep || !data->diag_rx_ep) {
+		BT_ERR("%s invalid diagnostic descriptors", hdev->name);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static struct urb *alloc_diag_urb(struct hci_dev *hdev, bool enable)
+{
+	struct btusb_data *data = hci_get_drvdata(hdev);
+	struct sk_buff *skb;
+	struct urb *urb;
+	unsigned int pipe;
+
+	if (!data->diag_tx_ep)
+		return ERR_PTR(-ENODEV);
+
+	urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!urb)
+		return ERR_PTR(-ENOMEM);
+
+	skb = bt_skb_alloc(2, GFP_KERNEL);
+	if (!skb) {
+		usb_free_urb(urb);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	*skb_put(skb, 1) = 0xf0;
+	*skb_put(skb, 1) = enable;
+
+	pipe = usb_sndbulkpipe(data->udev, data->diag_tx_ep->bEndpointAddress);
+
+	usb_fill_bulk_urb(urb, data->udev, pipe,
+			  skb->data, skb->len, btusb_tx_complete, skb);
+
+	skb->dev = (void *)hdev;
+
+	return urb;
+}
+
+static int btusb_bcm_set_diag(struct hci_dev *hdev, bool enable)
+{
+	struct btusb_data *data = hci_get_drvdata(hdev);
+	struct urb *urb;
+
+	if (!data->diag)
+		return -ENODEV;
+
+	if (!test_bit(HCI_RUNNING, &hdev->flags))
+		return -ENETDOWN;
+
+	urb = alloc_diag_urb(hdev, enable);
+	if (IS_ERR(urb))
+		return PTR_ERR(urb);
+
+	return submit_or_queue_tx_urb(hdev, urb);
+}
+#endif
+
 static int btusb_probe(struct usb_interface *intf,
 		       const struct usb_device_id *id)
 {
 	struct usb_endpoint_descriptor *ep_desc;
 	struct btusb_data *data;
 	struct hci_dev *hdev;
+	unsigned ifnum_base;
 	int i, err;
 
 	BT_DBG("intf %p id %p", intf, id);
 
 	/* interface numbers are hardcoded in the spec */
-	if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
-		return -ENODEV;
+	if (intf->cur_altsetting->desc.bInterfaceNumber != 0) {
+		if (!(id->driver_info & BTUSB_IFNUM_2))
+			return -ENODEV;
+		if (intf->cur_altsetting->desc.bInterfaceNumber != 2)
+			return -ENODEV;
+	}
+
+	ifnum_base = intf->cur_altsetting->desc.bInterfaceNumber;
 
 	if (!id->driver_info) {
 		const struct usb_device_id *match;
@@ -2640,6 +2851,7 @@
 	init_usb_anchor(&data->intr_anchor);
 	init_usb_anchor(&data->bulk_anchor);
 	init_usb_anchor(&data->isoc_anchor);
+	init_usb_anchor(&data->diag_anchor);
 	spin_lock_init(&data->rxlock);
 
 	if (id->driver_info & BTUSB_INTEL_NEW) {
@@ -2673,33 +2885,53 @@
 	hdev->send   = btusb_send_frame;
 	hdev->notify = btusb_notify;
 
+	if (id->driver_info & BTUSB_BCM2045)
+		set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+
 	if (id->driver_info & BTUSB_BCM92035)
 		hdev->setup = btusb_setup_bcm92035;
 
 #ifdef CONFIG_BT_HCIBTUSB_BCM
 	if (id->driver_info & BTUSB_BCM_PATCHRAM) {
+		hdev->manufacturer = 15;
 		hdev->setup = btbcm_setup_patchram;
+		hdev->set_diag = btusb_bcm_set_diag;
 		hdev->set_bdaddr = btbcm_set_bdaddr;
+
+		/* Broadcom LM_DIAG Interface numbers are hardcoded */
+		data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2);
 	}
 
-	if (id->driver_info & BTUSB_BCM_APPLE)
+	if (id->driver_info & BTUSB_BCM_APPLE) {
+		hdev->manufacturer = 15;
 		hdev->setup = btbcm_setup_apple;
+		hdev->set_diag = btusb_bcm_set_diag;
+
+		/* Broadcom LM_DIAG Interface numbers are hardcoded */
+		data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2);
+	}
 #endif
 
 	if (id->driver_info & BTUSB_INTEL) {
+		hdev->manufacturer = 2;
 		hdev->setup = btusb_setup_intel;
 		hdev->shutdown = btusb_shutdown_intel;
+		hdev->set_diag = btintel_set_diag_mfg;
 		hdev->set_bdaddr = btintel_set_bdaddr;
 		set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
 		set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+		set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
 	}
 
 	if (id->driver_info & BTUSB_INTEL_NEW) {
+		hdev->manufacturer = 2;
 		hdev->send = btusb_send_frame_intel;
 		hdev->setup = btusb_setup_intel_new;
 		hdev->hw_error = btintel_hw_error;
+		hdev->set_diag = btintel_set_diag;
 		hdev->set_bdaddr = btintel_set_bdaddr;
 		set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+		set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
 	}
 
 	if (id->driver_info & BTUSB_MARVELL)
@@ -2710,8 +2942,10 @@
 		set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
 	}
 
-	if (id->driver_info & BTUSB_INTEL_BOOT)
+	if (id->driver_info & BTUSB_INTEL_BOOT) {
+		hdev->manufacturer = 2;
 		set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+	}
 
 	if (id->driver_info & BTUSB_ATH3012) {
 		hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
@@ -2740,8 +2974,8 @@
 		/* AMP controllers do not support SCO packets */
 		data->isoc = NULL;
 	} else {
-		/* Interface numbers are hardcoded in the specification */
-		data->isoc = usb_ifnum_to_if(data->udev, 1);
+		/* Interface orders are hardcoded in the specification */
+		data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1);
 	}
 
 	if (!reset)
@@ -2804,6 +3038,16 @@
 		}
 	}
 
+#ifdef CONFIG_BT_HCIBTUSB_BCM
+	if (data->diag) {
+		if (!usb_driver_claim_interface(&btusb_driver,
+						data->diag, data))
+			__set_diag_interface(hdev);
+		else
+			data->diag = NULL;
+	}
+#endif
+
 	err = hci_register_dev(hdev);
 	if (err < 0) {
 		hci_free_dev(hdev);
@@ -2831,12 +3075,25 @@
 	if (data->isoc)
 		usb_set_intfdata(data->isoc, NULL);
 
+	if (data->diag)
+		usb_set_intfdata(data->diag, NULL);
+
 	hci_unregister_dev(hdev);
 
-	if (intf == data->isoc)
+	if (intf == data->intf) {
+		if (data->isoc)
+			usb_driver_release_interface(&btusb_driver, data->isoc);
+		if (data->diag)
+			usb_driver_release_interface(&btusb_driver, data->diag);
+	} else if (intf == data->isoc) {
+		if (data->diag)
+			usb_driver_release_interface(&btusb_driver, data->diag);
 		usb_driver_release_interface(&btusb_driver, data->intf);
-	else if (data->isoc)
-		usb_driver_release_interface(&btusb_driver, data->isoc);
+	} else if (intf == data->diag) {
+		usb_driver_release_interface(&btusb_driver, data->intf);
+		if (data->isoc)
+			usb_driver_release_interface(&btusb_driver, data->isoc);
+	}
 
 	hci_free_dev(hdev);
 }
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 7a722df..57eb935 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -155,9 +155,6 @@
 
 	BT_DBG("%s %p", hdev->name, hdev);
 
-	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	/* provide contexts for callbacks from ST */
 	hst = hci_get_drvdata(hdev);
 
@@ -181,7 +178,6 @@
 			goto done;
 
 		if (err != -EINPROGRESS) {
-			clear_bit(HCI_RUNNING, &hdev->flags);
 			BT_ERR("st_register failed %d", err);
 			return err;
 		}
@@ -195,7 +191,6 @@
 			(&hst->wait_reg_completion,
 			 msecs_to_jiffies(BT_REGISTER_TIMEOUT));
 		if (!timeleft) {
-			clear_bit(HCI_RUNNING, &hdev->flags);
 			BT_ERR("Timeout(%d sec),didn't get reg "
 					"completion signal from ST",
 					BT_REGISTER_TIMEOUT / 1000);
@@ -205,7 +200,6 @@
 		/* Is ST registration callback
 		 * called with ERROR status? */
 		if (hst->reg_status != 0) {
-			clear_bit(HCI_RUNNING, &hdev->flags);
 			BT_ERR("ST registration completed with invalid "
 					"status %d", hst->reg_status);
 			return -EAGAIN;
@@ -215,7 +209,6 @@
 		hst->st_write = ti_st_proto[i].write;
 		if (!hst->st_write) {
 			BT_ERR("undefined ST write function");
-			clear_bit(HCI_RUNNING, &hdev->flags);
 			for (i = 0; i < MAX_BT_CHNL_IDS; i++) {
 				/* Undo registration with ST */
 				err = st_unregister(&ti_st_proto[i]);
@@ -236,9 +229,6 @@
 	int err, i;
 	struct ti_st *hst = hci_get_drvdata(hdev);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) {
 		err = st_unregister(&ti_st_proto[i]);
 		if (err)
@@ -256,9 +246,6 @@
 	struct ti_st *hst;
 	long len;
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	hst = hci_get_drvdata(hdev);
 
 	/* Prepend skb with frame type */
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 84135c5..5026f66 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -357,8 +357,6 @@
 
 static int dtl1_hci_open(struct hci_dev *hdev)
 {
-	set_bit(HCI_RUNNING, &(hdev->flags));
-
 	return 0;
 }
 
@@ -376,9 +374,6 @@
 
 static int dtl1_hci_close(struct hci_dev *hdev)
 {
-	if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
-		return 0;
-
 	dtl1_hci_flush(hdev);
 
 	return 0;
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 6da5e4c..d776dfd 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -243,6 +243,7 @@
 static const struct hci_uart_proto athp = {
 	.id		= HCI_UART_ATH3K,
 	.name		= "ATH3K",
+	.manufacturer	= 69,
 	.open		= ath_open,
 	.close		= ath_close,
 	.flush		= ath_flush,
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index f306541..cb852cc 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -32,6 +32,8 @@
 #include <linux/gpio/consumer.h>
 #include <linux/tty.h>
 #include <linux/interrupt.h>
+#include <linux/dmi.h>
+#include <linux/pm_runtime.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -39,6 +41,11 @@
 #include "btbcm.h"
 #include "hci_uart.h"
 
+#define BCM_LM_DIAG_PKT 0x07
+#define BCM_LM_DIAG_SIZE 63
+
+#define BCM_AUTOSUSPEND_DELAY	5000 /* default autosleep delay */
+
 struct bcm_device {
 	struct list_head	list;
 
@@ -55,7 +62,7 @@
 	int			irq;
 	u8			irq_polarity;
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 	struct hci_uart		*hu;
 	bool			is_suspended; /* suspend/resume flag */
 #endif
@@ -152,13 +159,17 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 static irqreturn_t bcm_host_wake(int irq, void *data)
 {
 	struct bcm_device *bdev = data;
 
 	bt_dev_dbg(bdev, "Host wake IRQ");
 
+	pm_runtime_get(&bdev->pdev->dev);
+	pm_runtime_mark_last_busy(&bdev->pdev->dev);
+	pm_runtime_put_autosuspend(&bdev->pdev->dev);
+
 	return IRQ_HANDLED;
 }
 
@@ -182,6 +193,12 @@
 			goto unlock;
 
 		device_init_wakeup(&bdev->pdev->dev, true);
+
+		pm_runtime_set_autosuspend_delay(&bdev->pdev->dev,
+						 BCM_AUTOSUSPEND_DELAY);
+		pm_runtime_use_autosuspend(&bdev->pdev->dev);
+		pm_runtime_set_active(&bdev->pdev->dev);
+		pm_runtime_enable(&bdev->pdev->dev);
 	}
 
 unlock:
@@ -197,7 +214,7 @@
 	.bt_wake_active = 1,	/* BT_WAKE active mode: 1 = high, 0 = low */
 	.host_wake_active = 0,	/* HOST_WAKE active mode: 1 = high, 0 = low */
 	.allow_host_sleep = 1,	/* Allow host sleep in SCO flag */
-	.combine_modes = 0,	/* Combine sleep and LPM flag */
+	.combine_modes = 1,	/* Combine sleep and LPM flag */
 	.tristate_control = 0,	/* Allow tri-state control of UART tx flag */
 	/* Irrelevant USB flags */
 	.usb_auto_sleep = 0,
@@ -232,6 +249,29 @@
 static inline int bcm_setup_sleep(struct hci_uart *hu) { return 0; }
 #endif
 
+static int bcm_set_diag(struct hci_dev *hdev, bool enable)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+	struct bcm_data *bcm = hu->priv;
+	struct sk_buff *skb;
+
+	if (!test_bit(HCI_RUNNING, &hdev->flags))
+		return -ENETDOWN;
+
+	skb = bt_skb_alloc(3, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	*skb_put(skb, 1) = BCM_LM_DIAG_PKT;
+	*skb_put(skb, 1) = 0xf0;
+	*skb_put(skb, 1) = enable;
+
+	skb_queue_tail(&bcm->txq, skb);
+	hci_uart_tx_wakeup(hu);
+
+	return 0;
+}
+
 static int bcm_open(struct hci_uart *hu)
 {
 	struct bcm_data *bcm;
@@ -258,7 +298,7 @@
 		if (hu->tty->dev->parent == dev->pdev->dev.parent) {
 			bcm->dev = dev;
 			hu->init_speed = dev->init_speed;
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 			dev->hu = hu;
 #endif
 			bcm_gpio_set_power(bcm->dev, true);
@@ -282,7 +322,10 @@
 	mutex_lock(&bcm_device_lock);
 	if (bcm_device_exists(bdev)) {
 		bcm_gpio_set_power(bdev, false);
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
+		pm_runtime_disable(&bdev->pdev->dev);
+		pm_runtime_set_suspended(&bdev->pdev->dev);
+
 		if (device_can_wakeup(&bdev->pdev->dev)) {
 			devm_free_irq(&bdev->pdev->dev, bdev->irq, bdev);
 			device_init_wakeup(&bdev->pdev->dev, false);
@@ -322,6 +365,7 @@
 
 	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
+	hu->hdev->set_diag = bcm_set_diag;
 	hu->hdev->set_bdaddr = btbcm_set_bdaddr;
 
 	err = btbcm_initialize(hu->hdev, fw_name, sizeof(fw_name));
@@ -379,10 +423,18 @@
 	return err;
 }
 
+#define BCM_RECV_LM_DIAG \
+	.type = BCM_LM_DIAG_PKT, \
+	.hlen = BCM_LM_DIAG_SIZE, \
+	.loff = 0, \
+	.lsize = 0, \
+	.maxlen = BCM_LM_DIAG_SIZE
+
 static const struct h4_recv_pkt bcm_recv_pkts[] = {
-	{ H4_RECV_ACL,   .recv = hci_recv_frame },
-	{ H4_RECV_SCO,   .recv = hci_recv_frame },
-	{ H4_RECV_EVENT, .recv = hci_recv_frame },
+	{ H4_RECV_ACL,      .recv = hci_recv_frame },
+	{ H4_RECV_SCO,      .recv = hci_recv_frame },
+	{ H4_RECV_EVENT,    .recv = hci_recv_frame },
+	{ BCM_RECV_LM_DIAG, .recv = hci_recv_diag  },
 };
 
 static int bcm_recv(struct hci_uart *hu, const void *data, int count)
@@ -399,6 +451,15 @@
 		bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
 		bcm->rx_skb = NULL;
 		return err;
+	} else if (!bcm->rx_skb) {
+		/* Delay auto-suspend when receiving completed packet */
+		mutex_lock(&bcm_device_lock);
+		if (bcm->dev && bcm_device_exists(bcm->dev)) {
+			pm_runtime_get(&bcm->dev->pdev->dev);
+			pm_runtime_mark_last_busy(&bcm->dev->pdev->dev);
+			pm_runtime_put_autosuspend(&bcm->dev->pdev->dev);
+		}
+		mutex_unlock(&bcm_device_lock);
 	}
 
 	return count;
@@ -420,10 +481,76 @@
 static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
 {
 	struct bcm_data *bcm = hu->priv;
+	struct sk_buff *skb = NULL;
+	struct bcm_device *bdev = NULL;
 
-	return skb_dequeue(&bcm->txq);
+	mutex_lock(&bcm_device_lock);
+
+	if (bcm_device_exists(bcm->dev)) {
+		bdev = bcm->dev;
+		pm_runtime_get_sync(&bdev->pdev->dev);
+		/* Shall be resumed here */
+	}
+
+	skb = skb_dequeue(&bcm->txq);
+
+	if (bdev) {
+		pm_runtime_mark_last_busy(&bdev->pdev->dev);
+		pm_runtime_put_autosuspend(&bdev->pdev->dev);
+	}
+
+	mutex_unlock(&bcm_device_lock);
+
+	return skb;
 }
 
+#ifdef CONFIG_PM
+static int bcm_suspend_device(struct device *dev)
+{
+	struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+	bt_dev_dbg(bdev, "");
+
+	if (!bdev->is_suspended && bdev->hu) {
+		hci_uart_set_flow_control(bdev->hu, true);
+
+		/* Once this returns, driver suspends BT via GPIO */
+		bdev->is_suspended = true;
+	}
+
+	/* Suspend the device */
+	if (bdev->device_wakeup) {
+		gpiod_set_value(bdev->device_wakeup, false);
+		bt_dev_dbg(bdev, "suspend, delaying 15 ms");
+		mdelay(15);
+	}
+
+	return 0;
+}
+
+static int bcm_resume_device(struct device *dev)
+{
+	struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+	bt_dev_dbg(bdev, "");
+
+	if (bdev->device_wakeup) {
+		gpiod_set_value(bdev->device_wakeup, true);
+		bt_dev_dbg(bdev, "resume, delaying 15 ms");
+		mdelay(15);
+	}
+
+	/* When this executes, the device has woken up already */
+	if (bdev->is_suspended && bdev->hu) {
+		bdev->is_suspended = false;
+
+		hci_uart_set_flow_control(bdev->hu, false);
+	}
+
+	return 0;
+}
+#endif
+
 #ifdef CONFIG_PM_SLEEP
 /* Platform suspend callback */
 static int bcm_suspend(struct device *dev)
@@ -433,24 +560,17 @@
 
 	bt_dev_dbg(bdev, "suspend: is_suspended %d", bdev->is_suspended);
 
+	/* bcm_suspend can be called at any time as long as platform device is
+	 * bound, so it should use bcm_device_lock to protect access to hci_uart
+	 * and device_wake-up GPIO.
+	 */
 	mutex_lock(&bcm_device_lock);
 
 	if (!bdev->hu)
 		goto unlock;
 
-	if (!bdev->is_suspended) {
-		hci_uart_set_flow_control(bdev->hu, true);
-
-		/* Once this callback returns, driver suspends BT via GPIO */
-		bdev->is_suspended = true;
-	}
-
-	/* Suspend the device */
-	if (bdev->device_wakeup) {
-		gpiod_set_value(bdev->device_wakeup, false);
-		bt_dev_dbg(bdev, "suspend, delaying 15 ms");
-		mdelay(15);
-	}
+	if (pm_runtime_active(dev))
+		bcm_suspend_device(dev);
 
 	if (device_may_wakeup(&bdev->pdev->dev)) {
 		error = enable_irq_wake(bdev->irq);
@@ -471,6 +591,10 @@
 
 	bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended);
 
+	/* bcm_resume can be called at any time as long as platform device is
+	 * bound, so it should use bcm_device_lock to protect access to hci_uart
+	 * and device_wake-up GPIO.
+	 */
 	mutex_lock(&bcm_device_lock);
 
 	if (!bdev->hu)
@@ -481,22 +605,15 @@
 		bt_dev_dbg(bdev, "BCM irq: disabled");
 	}
 
-	if (bdev->device_wakeup) {
-		gpiod_set_value(bdev->device_wakeup, true);
-		bt_dev_dbg(bdev, "resume, delaying 15 ms");
-		mdelay(15);
-	}
-
-	/* When this callback executes, the device has woken up already */
-	if (bdev->is_suspended) {
-		bdev->is_suspended = false;
-
-		hci_uart_set_flow_control(bdev->hu, false);
-	}
+	bcm_resume_device(dev);
 
 unlock:
 	mutex_unlock(&bcm_device_lock);
 
+	pm_runtime_disable(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+
 	return 0;
 }
 #endif
@@ -513,6 +630,22 @@
 };
 
 #ifdef CONFIG_ACPI
+static u8 acpi_active_low = ACPI_ACTIVE_LOW;
+
+/* IRQ polarity of some chipsets are not defined correctly in ACPI table. */
+static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
+	{
+		.ident = "Asus T100TA",
+		.matches = {
+			DMI_EXACT_MATCH(DMI_SYS_VENDOR,
+					"ASUSTeK COMPUTER INC."),
+			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+		},
+		.driver_data = &acpi_active_low,
+	},
+	{ }
+};
+
 static int bcm_resource(struct acpi_resource *ares, void *data)
 {
 	struct bcm_device *dev = data;
@@ -549,15 +682,10 @@
 static int bcm_acpi_probe(struct bcm_device *dev)
 {
 	struct platform_device *pdev = dev->pdev;
-	const struct acpi_device_id *id;
-	struct acpi_device *adev;
 	LIST_HEAD(resources);
+	const struct dmi_system_id *dmi_id;
 	int ret;
 
-	id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
-	if (!id)
-		return -ENODEV;
-
 	/* Retrieve GPIO data */
 	dev->name = dev_name(&pdev->dev);
 	ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
@@ -602,11 +730,18 @@
 	}
 
 	/* Retrieve UART ACPI info */
-	adev = ACPI_COMPANION(&dev->pdev->dev);
-	if (!adev)
-		return 0;
+	ret = acpi_dev_get_resources(ACPI_COMPANION(&dev->pdev->dev),
+				     &resources, bcm_resource, dev);
+	if (ret < 0)
+		return ret;
+	acpi_dev_free_resource_list(&resources);
 
-	acpi_dev_get_resources(adev, &resources, bcm_resource, dev);
+	dmi_id = dmi_first_match(bcm_wrong_irq_dmi_table);
+	if (dmi_id) {
+		bt_dev_warn(dev, "%s: Overwriting IRQ polarity to active low",
+			    dmi_id->ident);
+		dev->irq_polarity = *(u8 *)dmi_id->driver_data;
+	}
 
 	return 0;
 }
@@ -620,7 +755,6 @@
 static int bcm_probe(struct platform_device *pdev)
 {
 	struct bcm_device *dev;
-	struct acpi_device_id *pdata = pdev->dev.platform_data;
 	int ret;
 
 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
@@ -629,15 +763,9 @@
 
 	dev->pdev = pdev;
 
-	if (ACPI_HANDLE(&pdev->dev)) {
-		ret = bcm_acpi_probe(dev);
-		if (ret)
-			return ret;
-	} else if (pdata) {
-		dev->name = pdata->id;
-	} else {
-		return -ENODEV;
-	}
+	ret = bcm_acpi_probe(dev);
+	if (ret)
+		return ret;
 
 	platform_set_drvdata(pdev, dev);
 
@@ -671,6 +799,7 @@
 static const struct hci_uart_proto bcm_proto = {
 	.id		= HCI_UART_BCM,
 	.name		= "BCM",
+	.manufacturer	= 15,
 	.init_speed	= 115200,
 	.oper_speed	= 4000000,
 	.open		= bcm_open,
@@ -693,7 +822,10 @@
 #endif
 
 /* Platform suspend and resume callbacks */
-static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume);
+static const struct dev_pm_ops bcm_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(bcm_suspend, bcm_resume)
+	SET_RUNTIME_PM_OPS(bcm_suspend_device, bcm_resume_device, NULL)
+};
 
 static struct platform_driver bcm_driver = {
 	.probe = bcm_probe,
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index eec3f28..a6fce48 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -266,3 +266,4 @@
 
 	return skb;
 }
+EXPORT_SYMBOL_GPL(h4_recv_buf);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index b35b238..abee221 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -128,7 +128,7 @@
 {
 	const unsigned char sync_req[] = { 0x01, 0x7e };
 	unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
-	struct hci_uart *hu = (struct hci_uart *) arg;
+	struct hci_uart *hu = (struct hci_uart *)arg;
 	struct h5 *h5 = hu->priv;
 	struct sk_buff *skb;
 	unsigned long flags;
@@ -210,7 +210,7 @@
 
 	init_timer(&h5->timer);
 	h5->timer.function = h5_timed_event;
-	h5->timer.data = (unsigned long) hu;
+	h5->timer.data = (unsigned long)hu;
 
 	h5->tx_win = H5_TX_WIN_MAX;
 
@@ -453,7 +453,7 @@
 		return -ENOMEM;
 	}
 
-	h5->rx_skb->dev = (void *) hu->hdev;
+	h5->rx_skb->dev = (void *)hu->hdev;
 
 	return 0;
 }
@@ -696,7 +696,7 @@
 	}
 
 	skb = skb_dequeue(&h5->unrel);
-	if (skb != NULL) {
+	if (skb) {
 		nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
 				      skb->data, skb->len);
 		if (nskb) {
@@ -714,7 +714,7 @@
 		goto unlock;
 
 	skb = skb_dequeue(&h5->rel);
-	if (skb != NULL) {
+	if (skb) {
 		nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
 				      skb->data, skb->len);
 		if (nskb) {
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 49e2540..4a414a5 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -557,6 +557,7 @@
 
 	bt_dev_dbg(hdev, "start intel_setup");
 
+	hu->hdev->set_diag = btintel_set_diag;
 	hu->hdev->set_bdaddr = btintel_set_bdaddr;
 
 	calltime = ktime_get();
@@ -1147,6 +1148,7 @@
 static const struct hci_uart_proto intel_proto = {
 	.id		= HCI_UART_INTEL,
 	.name		= "Intel",
+	.manufacturer	= 2,
 	.init_speed	= 115200,
 	.oper_speed	= 3000000,
 	.open		= intel_open,
@@ -1165,22 +1167,6 @@
 	{ },
 };
 MODULE_DEVICE_TABLE(acpi, intel_acpi_match);
-
-static int intel_acpi_probe(struct intel_device *idev)
-{
-	const struct acpi_device_id *id;
-
-	id = acpi_match_device(intel_acpi_match, &idev->pdev->dev);
-	if (!id)
-		return -ENODEV;
-
-	return 0;
-}
-#else
-static int intel_acpi_probe(struct intel_device *idev)
-{
-	return -ENODEV;
-}
 #endif
 
 #ifdef CONFIG_PM
@@ -1248,14 +1234,6 @@
 
 	idev->pdev = pdev;
 
-	if (ACPI_HANDLE(&pdev->dev)) {
-		int err = intel_acpi_probe(idev);
-		if (err)
-			return err;
-	} else {
-		return -ENODEV;
-	}
-
 	idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
 					      GPIOD_OUT_LOW);
 	if (IS_ERR(idev->reset)) {
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 0d5a05a..96bcec5 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -208,9 +208,6 @@
 	BT_DBG("%s %p", hdev->name, hdev);
 
 	/* Nothing to do for UART driver */
-
-	set_bit(HCI_RUNNING, &hdev->flags);
-
 	return 0;
 }
 
@@ -241,9 +238,6 @@
 {
 	BT_DBG("hdev %p", hdev);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	hci_uart_flush(hdev);
 	hdev->flush = NULL;
 	return 0;
@@ -254,9 +248,6 @@
 {
 	struct hci_uart *hu = hci_get_drvdata(hdev);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
 
 	hu->proto->enqueue(hu, skb);
@@ -470,8 +461,6 @@
 	INIT_WORK(&hu->init_ready, hci_uart_init_work);
 	INIT_WORK(&hu->write_work, hci_uart_write_work);
 
-	spin_lock_init(&hu->rx_lock);
-
 	/* Flush any pending characters in the driver and line discipline. */
 
 	/* FIXME: why is this needed. Note don't use ldisc_ref here as the
@@ -569,14 +558,14 @@
 	if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
 		return;
 
-	spin_lock(&hu->rx_lock);
+	/* It does not need a lock here as it is already protected by a mutex in
+	 * tty caller
+	 */
 	hu->proto->recv(hu, data, count);
 
 	if (hu->hdev)
 		hu->hdev->stat.byte_rx += count;
 
-	spin_unlock(&hu->rx_lock);
-
 	tty_unthrottle(tty);
 }
 
@@ -598,6 +587,13 @@
 	hdev->bus = HCI_UART;
 	hci_set_drvdata(hdev, hu);
 
+	/* Only when vendor specific setup callback is provided, consider
+	 * the manufacturer information valid. This avoids filling in the
+	 * value for Ericsson when nothing is specified.
+	 */
+	if (hu->proto->setup)
+		hdev->manufacturer = hu->proto->manufacturer;
+
 	hdev->open  = hci_uart_open;
 	hdev->close = hci_uart_close;
 	hdev->flush = hci_uart_flush;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 21f4ea4..77eae64 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -347,7 +347,7 @@
 	struct hci_uart *hu = (struct hci_uart *)arg;
 	struct qca_data *qca = hu->priv;
 	unsigned long flags, retrans_delay;
-	unsigned long retransmit = 0;
+	bool retransmit = false;
 
 	BT_DBG("hu %p wake retransmit timeout in %d state",
 		hu, qca->tx_ibs_state);
@@ -358,7 +358,7 @@
 	switch (qca->tx_ibs_state) {
 	case HCI_IBS_TX_WAKING:
 		/* No WAKE_ACK, retransmit WAKE */
-		retransmit = 1;
+		retransmit = true;
 		if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
 			BT_ERR("Failed to acknowledge device wake up");
 			break;
@@ -947,6 +947,7 @@
 static struct hci_uart_proto qca_proto = {
 	.id		= HCI_UART_QCA,
 	.name		= "QCA",
+	.manufacturer	= 29,
 	.init_speed	= 115200,
 	.oper_speed	= 3000000,
 	.open		= qca_open,
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 495b9ef..82c92f1 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -59,6 +59,7 @@
 struct hci_uart_proto {
 	unsigned int id;
 	const char *name;
+	unsigned int manufacturer;
 	unsigned int init_speed;
 	unsigned int oper_speed;
 	int (*open)(struct hci_uart *hu);
@@ -85,7 +86,6 @@
 
 	struct sk_buff		*tx_skb;
 	unsigned long		tx_state;
-	spinlock_t		rx_lock;
 
 	unsigned int init_speed;
 	unsigned int oper_speed;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 78653db..ed888e3 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -55,8 +55,6 @@
 
 static int vhci_open_dev(struct hci_dev *hdev)
 {
-	set_bit(HCI_RUNNING, &hdev->flags);
-
 	return 0;
 }
 
@@ -64,9 +62,6 @@
 {
 	struct vhci_data *data = hci_get_drvdata(hdev);
 
-	if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
-		return 0;
-
 	skb_queue_purge(&data->readq);
 
 	return 0;
@@ -85,9 +80,6 @@
 {
 	struct vhci_data *data = hci_get_drvdata(hdev);
 
-	if (!test_bit(HCI_RUNNING, &hdev->flags))
-		return -EBUSY;
-
 	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
 	skb_queue_tail(&data->readq, skb);
 
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 1a82f3a..0ebca8b 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -36,7 +36,6 @@
 
 config ARM_CCI500_PMU
 	bool "ARM CCI500 PMU support"
-	default y
 	depends on (ARM && CPU_V7) || ARM64
 	depends on PERF_EVENTS
 	select ARM_CCI_PMU
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index c37cf75..3c77645 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -344,11 +344,12 @@
 	if (IS_ERR(ctx->csr_base))
 		return PTR_ERR(ctx->csr_base);
 
-	ctx->irq = platform_get_irq(pdev, 0);
-	if (ctx->irq < 0) {
+	rc = platform_get_irq(pdev, 0);
+	if (rc < 0) {
 		dev_err(&pdev->dev, "No IRQ resource\n");
-		return ctx->irq;
+		return rc;
 	}
+	ctx->irq = rc;
 
 	dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
 		ctx->csr_base, ctx->irq);
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 5837eb8..85da8b9 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -197,6 +197,7 @@
 	for_each_node_by_type(dn, "cpu") {
 		struct clk_init_data init;
 		struct clk *clk;
+		struct clk *parent_clk;
 		char *clk_name = kzalloc(5, GFP_KERNEL);
 		int cpu, err;
 
@@ -208,8 +209,9 @@
 			goto bail_out;
 
 		sprintf(clk_name, "cpu%d", cpu);
+		parent_clk = of_clk_get(node, 0);
 
-		cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
+		cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
 		cpuclk[cpu].clk_name = clk_name;
 		cpuclk[cpu].cpu = cpu;
 		cpuclk[cpu].reg_base = clock_complex_base;
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 7c1e1f5..2fe37f7 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -164,7 +164,7 @@
 	 * the values for DIV_COPY and DIV_HPM dividers need not be set.
 	 */
 	div0 = cfg_data->div0;
-	if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
+	if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
 		div1 = cfg_data->div1;
 		if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
 			div1 = readl(base + E4210_DIV_CPU1) &
@@ -185,7 +185,7 @@
 		alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
 		WARN_ON(alt_div >= MAX_DIV);
 
-		if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+		if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
 			/*
 			 * In Exynos4210, ATB clock parent is also mout_core. So
 			 * ATB clock also needs to be mantained at safe speed.
@@ -206,7 +206,7 @@
 	writel(div0, base + E4210_DIV_CPU0);
 	wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
 
-	if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
+	if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
 		writel(div1, base + E4210_DIV_CPU1);
 		wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
 				DIV_MASK_ALL);
@@ -225,7 +225,7 @@
 	unsigned long mux_reg;
 
 	/* find out the divider values to use for clock data */
-	if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+	if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
 		while ((cfg_data->prate * 1000) != ndata->new_rate) {
 			if (cfg_data->prate == 0)
 				return -EINVAL;
@@ -240,7 +240,7 @@
 	writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
 	wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
 
-	if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+	if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
 		div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
 		div_mask |= E4210_DIV0_ATB_MASK;
 	}
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 676ee8f..8831e1a 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -374,7 +374,6 @@
 	DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
 	DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
 	DT_CLK(NULL, "uart3_ick", "uart3_ick"),
-	DT_CLK(NULL, "uart4_ick", "uart4_ick"),
 	DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
 	DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
 	DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
@@ -519,6 +518,7 @@
 static struct ti_dt_clk omap36xx_clks[] = {
 	DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
 	DT_CLK(NULL, "uart4_fck", "uart4_fck"),
+	DT_CLK(NULL, "uart4_ick", "uart4_ick"),
 	{ .node_name = NULL },
 };
 
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 9b5b289..a911d7d 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -18,7 +18,6 @@
 
 #include "clock.h"
 
-#define DRA7_DPLL_ABE_DEFFREQ				180633600
 #define DRA7_DPLL_GMAC_DEFFREQ				1000000000
 #define DRA7_DPLL_USB_DEFFREQ				960000000
 
@@ -313,27 +312,12 @@
 int __init dra7xx_dt_clk_init(void)
 {
 	int rc;
-	struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck;
+	struct clk *dpll_ck, *hdcp_ck;
 
 	ti_dt_clocks_register(dra7xx_clks);
 
 	omap2_clk_disable_autoidle_all();
 
-	abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux");
-	sys_clkin2 = clk_get_sys(NULL, "sys_clkin2");
-	dpll_ck = clk_get_sys(NULL, "dpll_abe_ck");
-
-	rc = clk_set_parent(abe_dpll_mux, sys_clkin2);
-	if (!rc)
-		rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ);
-	if (rc)
-		pr_err("%s: failed to configure ABE DPLL!\n", __func__);
-
-	dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
-	rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
-	if (rc)
-		pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
-
 	dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
 	rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
 	if (rc)
diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c
index 90d7d8a..1ddc288 100644
--- a/drivers/clk/ti/clkt_dflt.c
+++ b/drivers/clk/ti/clkt_dflt.c
@@ -222,7 +222,7 @@
 		}
 	}
 
-	if (unlikely(!clk->enable_reg)) {
+	if (unlikely(IS_ERR(clk->enable_reg))) {
 		pr_err("%s: %s missing enable_reg\n", __func__,
 		       clk_hw_get_name(hw));
 		ret = -EINVAL;
@@ -264,7 +264,7 @@
 	u32 v;
 
 	clk = to_clk_hw_omap(hw);
-	if (!clk->enable_reg) {
+	if (IS_ERR(clk->enable_reg)) {
 		/*
 		 * 'independent' here refers to a clock which is not
 		 * controlled by its parent.
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index bb2c2b0..d3c1742 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -148,7 +148,7 @@
 	bc_timer.freq = clk_get_rate(timer_clk);
 
 	irq = irq_of_parse_and_map(np, 0);
-	if (irq == NO_IRQ) {
+	if (!irq) {
 		pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
 		return;
 	}
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index edacf39..1cea08c 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -152,7 +152,7 @@
 	int irq, error;
 
 	irq  = irq_of_parse_and_map(np, 0);
-	if (irq == NO_IRQ) {
+	if (!irq) {
 		pr_err("%s: failed to map interrupts\n", __func__);
 		return;
 	}
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 7982772..cec1ee2 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -149,6 +149,9 @@
 {
 	struct acpi_cpufreq_data *data = policy->driver_data;
 
+	if (unlikely(!data))
+		return -ENODEV;
+
 	return cpufreq_show_cpus(data->freqdomain_cpus, buf);
 }
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ef5ed94..25c4c15 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1436,8 +1436,10 @@
 	 * since this is a core component, and is essential for the
 	 * subsequent light-weight ->init() to succeed.
 	 */
-	if (cpufreq_driver->exit)
+	if (cpufreq_driver->exit) {
 		cpufreq_driver->exit(policy);
+		policy->freq_table = NULL;
+	}
 }
 
 /**
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 3af9dd7..aa33b92 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -776,6 +776,11 @@
 	local_irq_save(flags);
 	rdmsrl(MSR_IA32_APERF, aperf);
 	rdmsrl(MSR_IA32_MPERF, mperf);
+	if (cpu->prev_mperf == mperf) {
+		local_irq_restore(flags);
+		return;
+	}
+
 	tsc = rdtsc();
 	local_irq_restore(flags);
 
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index b60698b..bc2a55b 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -687,6 +687,33 @@
 
 int mv_cesa_queue_req(struct crypto_async_request *req);
 
+/*
+ * Helper function that indicates whether a crypto request needs to be
+ * cleaned up or not after being enqueued using mv_cesa_queue_req().
+ */
+static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
+					    int ret)
+{
+	/*
+	 * The queue still had some space, the request was queued
+	 * normally, so there's no need to clean it up.
+	 */
+	if (ret == -EINPROGRESS)
+		return false;
+
+	/*
+	 * The queue had not space left, but since the request is
+	 * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to
+	 * the backlog and will be processed later. There's no need to
+	 * clean it up.
+	 */
+	if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+		return false;
+
+	/* Request wasn't queued, we need to clean it up */
+	return true;
+}
+
 /* TDMA functions */
 
 static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 0745cf3..3df2f4e 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -189,7 +189,6 @@
 {
 	struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
 	struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
-
 	creq->req.base.engine = engine;
 
 	if (creq->req.base.type == CESA_DMA_REQ)
@@ -431,7 +430,7 @@
 		return ret;
 
 	ret = mv_cesa_queue_req(&req->base);
-	if (ret && ret != -EINPROGRESS)
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
 		mv_cesa_ablkcipher_cleanup(req);
 
 	return ret;
@@ -551,7 +550,7 @@
 		return ret;
 
 	ret = mv_cesa_queue_req(&req->base);
-	if (ret && ret != -EINPROGRESS)
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
 		mv_cesa_ablkcipher_cleanup(req);
 
 	return ret;
@@ -693,7 +692,7 @@
 		return ret;
 
 	ret = mv_cesa_queue_req(&req->base);
-	if (ret && ret != -EINPROGRESS)
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
 		mv_cesa_ablkcipher_cleanup(req);
 
 	return ret;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index ae9272e..e8d0d71 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -739,10 +739,8 @@
 		return 0;
 
 	ret = mv_cesa_queue_req(&req->base);
-	if (ret && ret != -EINPROGRESS) {
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
 		mv_cesa_ahash_cleanup(req);
-		return ret;
-	}
 
 	return ret;
 }
@@ -766,7 +764,7 @@
 		return 0;
 
 	ret = mv_cesa_queue_req(&req->base);
-	if (ret && ret != -EINPROGRESS)
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
 		mv_cesa_ahash_cleanup(req);
 
 	return ret;
@@ -791,7 +789,7 @@
 		return 0;
 
 	ret = mv_cesa_queue_req(&req->base);
-	if (ret && ret != -EINPROGRESS)
+	if (mv_cesa_req_needs_cleanup(&req->base, ret))
 		mv_cesa_ahash_cleanup(req);
 
 	return ret;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index a57b419..0a5ca0b 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -88,6 +88,9 @@
 	struct pci_dev *parent = pdev->bus->self;
 	uint16_t bridge_ctl = 0;
 
+	if (accel_dev->is_vf)
+		return;
+
 	dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
 		 accel_dev->accel_id);
 
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 3927ed9..ca848cc 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -492,7 +492,7 @@
 	if (err) {
 		put_device(&devfreq->dev);
 		mutex_unlock(&devfreq->lock);
-		goto err_dev;
+		goto err_out;
 	}
 
 	mutex_unlock(&devfreq->lock);
@@ -518,7 +518,6 @@
 err_init:
 	list_del(&devfreq->node);
 	device_unregister(&devfreq->dev);
-err_dev:
 	kfree(devfreq);
 err_out:
 	return ERR_PTR(err);
@@ -795,8 +794,10 @@
 		ret = PTR_ERR(governor);
 		goto out;
 	}
-	if (df->governor == governor)
+	if (df->governor == governor) {
+		ret = 0;
 		goto out;
+	}
 
 	if (df->governor) {
 		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a165b4b..dd24375 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -455,6 +455,15 @@
 	return desc;
 }
 
+void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
+{
+	memset(&desc->lld, 0, sizeof(desc->lld));
+	INIT_LIST_HEAD(&desc->descs_list);
+	desc->direction = DMA_TRANS_NONE;
+	desc->xfer_size = 0;
+	desc->active_xfer = false;
+}
+
 /* Call must be protected by lock. */
 static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
 {
@@ -466,7 +475,7 @@
 		desc = list_first_entry(&atchan->free_descs_list,
 					struct at_xdmac_desc, desc_node);
 		list_del(&desc->desc_node);
-		desc->active_xfer = false;
+		at_xdmac_init_used_desc(desc);
 	}
 
 	return desc;
@@ -875,14 +884,14 @@
 
 	if (xt->src_inc) {
 		if (xt->src_sgl)
-			chan_cc |=  AT_XDMAC_CC_SAM_UBS_DS_AM;
+			chan_cc |=  AT_XDMAC_CC_SAM_UBS_AM;
 		else
 			chan_cc |=  AT_XDMAC_CC_SAM_INCREMENTED_AM;
 	}
 
 	if (xt->dst_inc) {
 		if (xt->dst_sgl)
-			chan_cc |=  AT_XDMAC_CC_DAM_UBS_DS_AM;
+			chan_cc |=  AT_XDMAC_CC_DAM_UBS_AM;
 		else
 			chan_cc |=  AT_XDMAC_CC_DAM_INCREMENTED_AM;
 	}
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3ff284c..09479d4 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -554,10 +554,18 @@
 	mutex_lock(&dma_list_mutex);
 
 	if (chan->client_count == 0) {
+		struct dma_device *device = chan->device;
+
+		dma_cap_set(DMA_PRIVATE, device->cap_mask);
+		device->privatecnt++;
 		err = dma_chan_get(chan);
-		if (err)
+		if (err) {
 			pr_debug("%s: failed to get %s: (%d)\n",
 				__func__, dma_chan_name(chan), err);
+			chan = NULL;
+			if (--device->privatecnt == 0)
+				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+		}
 	} else
 		chan = NULL;
 
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cf1c87f..bedce03 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1591,7 +1591,6 @@
 	INIT_LIST_HEAD(&dw->dma.channels);
 	for (i = 0; i < nr_channels; i++) {
 		struct dw_dma_chan	*dwc = &dw->chan[i];
-		int			r = nr_channels - i - 1;
 
 		dwc->chan.device = &dw->dma;
 		dma_cookie_init(&dwc->chan);
@@ -1603,7 +1602,7 @@
 
 		/* 7 is highest priority & 0 is lowest. */
 		if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
-			dwc->priority = r;
+			dwc->priority = nr_channels - i - 1;
 		else
 			dwc->priority = i;
 
@@ -1622,6 +1621,7 @@
 		/* Hardware configuration */
 		if (autocfg) {
 			unsigned int dwc_params;
+			unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
 			void __iomem *addr = chip->regs + r * sizeof(u32);
 
 			dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 18c14e1..48d6d9e 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -355,23 +355,23 @@
 	struct idma64_desc *desc = idma64c->desc;
 	struct idma64_hw_desc *hw;
 	size_t bytes = desc->length;
-	u64 llp;
-	u32 ctlhi;
+	u64 llp = channel_readq(idma64c, LLP);
+	u32 ctlhi = channel_readl(idma64c, CTL_HI);
 	unsigned int i = 0;
 
-	llp = channel_readq(idma64c, LLP);
 	do {
 		hw = &desc->hw[i];
-	} while ((hw->llp != llp) && (++i < desc->ndesc));
+		if (hw->llp == llp)
+			break;
+		bytes -= hw->len;
+	} while (++i < desc->ndesc);
 
 	if (!i)
 		return bytes;
 
-	do {
-		bytes -= desc->hw[--i].len;
-	} while (i);
+	/* The current chunk is not fully transfered yet */
+	bytes += desc->hw[--i].len;
 
-	ctlhi = channel_readl(idma64c, CTL_HI);
 	return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
 }
 
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 5cb61ce..fc4156a 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -473,8 +473,10 @@
 		return;
 
 	/* clear the channel mapping in DRCMR */
-	reg = pxad_drcmr(chan->drcmr);
-	writel_relaxed(0, chan->phy->base + reg);
+	if (chan->drcmr <= DRCMR_CHLNUM) {
+		reg = pxad_drcmr(chan->drcmr);
+		writel_relaxed(0, chan->phy->base + reg);
+	}
 
 	spin_lock_irqsave(&pdev->phy_lock, flags);
 	for (i = 0; i < 32; i++)
@@ -516,8 +518,10 @@
 		"%s(); phy=%p(%d) misaligned=%d\n", __func__,
 		phy, phy->idx, misaligned);
 
-	reg = pxad_drcmr(phy->vchan->drcmr);
-	writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+	if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
+		reg = pxad_drcmr(phy->vchan->drcmr);
+		writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+	}
 
 	dalgn = phy_readl_relaxed(phy, DALGN);
 	if (misaligned)
@@ -887,6 +891,7 @@
 	struct dma_async_tx_descriptor *tx;
 	struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
 
+	INIT_LIST_HEAD(&vd->node);
 	tx = vchan_tx_prep(vc, vd, tx_flags);
 	tx->tx_submit = pxad_tx_submit;
 	dev_dbg(&chan->vc.chan.dev->device,
@@ -910,14 +915,18 @@
 		width = chan->cfg.src_addr_width;
 		dev_addr = chan->cfg.src_addr;
 		*dev_src = dev_addr;
-		*dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC;
+		*dcmd |= PXA_DCMD_INCTRGADDR;
+		if (chan->drcmr <= DRCMR_CHLNUM)
+			*dcmd |= PXA_DCMD_FLOWSRC;
 	}
 	if (dir == DMA_MEM_TO_DEV) {
 		maxburst = chan->cfg.dst_maxburst;
 		width = chan->cfg.dst_addr_width;
 		dev_addr = chan->cfg.dst_addr;
 		*dev_dst = dev_addr;
-		*dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG;
+		*dcmd |= PXA_DCMD_INCSRCADDR;
+		if (chan->drcmr <= DRCMR_CHLNUM)
+			*dcmd |= PXA_DCMD_FLOWTRG;
 	}
 	if (dir == DMA_MEM_TO_MEM)
 		*dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
@@ -1177,6 +1186,16 @@
 	else
 		curr = phy_readl_relaxed(chan->phy, DTADR);
 
+	/*
+	 * curr has to be actually read before checking descriptor
+	 * completion, so that a curr inside a status updater
+	 * descriptor implies the following test returns true, and
+	 * preventing reordering of curr load and the test.
+	 */
+	rmb();
+	if (is_desc_completed(vd))
+		goto out;
+
 	for (i = 0; i < sw_desc->nb_desc - 1; i++) {
 		hw_desc = sw_desc->hw_desc[i];
 		if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index a1a500d..1661d518 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -599,13 +599,13 @@
 static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
 {
 	struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
-	struct sun4i_dma_promise *promise;
+	struct sun4i_dma_promise *promise, *tmp;
 
 	/* Free all the demands and completed demands */
-	list_for_each_entry(promise, &contract->demands, list)
+	list_for_each_entry_safe(promise, tmp, &contract->demands, list)
 		kfree(promise);
 
-	list_for_each_entry(promise, &contract->completed_demands, list)
+	list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
 		kfree(promise);
 
 	kfree(contract);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index b23e8d5..8d57b1b 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -59,7 +59,6 @@
 #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN		0xD070
 #define XGENE_DMA_RING_BLK_MEM_RDY		0xD074
 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL		0xFFFFFFFF
-#define XGENE_DMA_RING_DESC_CNT(v)		(((v) & 0x0001FFFE) >> 1)
 #define XGENE_DMA_RING_ID_GET(owner, num)	(((owner) << 6) | (num))
 #define XGENE_DMA_RING_DST_ID(v)		((1 << 10) | (v))
 #define XGENE_DMA_RING_CMD_OFFSET		0x2C
@@ -379,14 +378,6 @@
 	return flyby_type[src_cnt];
 }
 
-static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
-{
-	u32 __iomem *cmd_base = ring->cmd_base;
-	u32 ring_state = ioread32(&cmd_base[1]);
-
-	return XGENE_DMA_RING_DESC_CNT(ring_state);
-}
-
 static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
 				     dma_addr_t *paddr)
 {
@@ -659,15 +650,12 @@
 	dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
 }
 
-static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
-				   struct xgene_dma_desc_sw *desc_sw)
+static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
+				    struct xgene_dma_desc_sw *desc_sw)
 {
+	struct xgene_dma_ring *ring = &chan->tx_ring;
 	struct xgene_dma_desc_hw *desc_hw;
 
-	/* Check if can push more descriptor to hw for execution */
-	if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
-		return -EBUSY;
-
 	/* Get hw descriptor from DMA tx ring */
 	desc_hw = &ring->desc_hw[ring->head];
 
@@ -694,11 +682,13 @@
 		memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
 	}
 
+	/* Increment the pending transaction count */
+	chan->pending += ((desc_sw->flags &
+			  XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
+
 	/* Notify the hw that we have descriptor ready for execution */
 	iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
 		  2 : 1, ring->cmd);
-
-	return 0;
 }
 
 /**
@@ -710,7 +700,6 @@
 static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
 {
 	struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
-	int ret;
 
 	/*
 	 * If the list of pending descriptors is empty, then we
@@ -735,18 +724,13 @@
 		if (chan->pending >= chan->max_outstanding)
 			return;
 
-		ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw);
-		if (ret)
-			return;
+		xgene_chan_xfer_request(chan, desc_sw);
 
 		/*
 		 * Delete this element from ld pending queue and append it to
 		 * ld running queue
 		 */
 		list_move_tail(&desc_sw->node, &chan->ld_running);
-
-		/* Increment the pending transaction count */
-		chan->pending++;
 	}
 }
 
@@ -821,7 +805,8 @@
 		 * Decrement the pending transaction count
 		 * as we have processed one
 		 */
-		chan->pending--;
+		chan->pending -= ((desc_sw->flags &
+				  XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
 
 		/*
 		 * Delete this node from ld running queue and append it to
@@ -1421,15 +1406,18 @@
 				     struct xgene_dma_ring *ring,
 				     enum xgene_dma_ring_cfgsize cfgsize)
 {
+	int ret;
+
 	/* Setup DMA ring descriptor variables */
 	ring->pdma = chan->pdma;
 	ring->cfgsize = cfgsize;
 	ring->num = chan->pdma->ring_num++;
 	ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
 
-	ring->size = xgene_dma_get_ring_size(chan, cfgsize);
-	if (ring->size <= 0)
-		return ring->size;
+	ret = xgene_dma_get_ring_size(chan, cfgsize);
+	if (ret <= 0)
+		return ret;
+	ring->size = ret;
 
 	/* Allocate memory for DMA ring descriptor */
 	ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
@@ -1482,7 +1470,7 @@
 		 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
 
 	/* Set the max outstanding request possible to this channel */
-	chan->max_outstanding = rx_ring->slots;
+	chan->max_outstanding = tx_ring->slots;
 
 	return ret;
 }
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 39915a6..c017fcd 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -739,7 +739,7 @@
 	struct dma_chan *chan;
 	struct zx_dma_chan *c;
 
-	if (request > d->dma_requests)
+	if (request >= d->dma_requests)
 		return NULL;
 
 	chan = dma_get_any_slave_channel(&d->slave);
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index a07addd..8dd0af1 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -159,7 +159,7 @@
 static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
 {
 	if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
-		*attached = new ? true : false;
+		*attached = ((new >> idx) & 0x1) ? true : false;
 		return true;
 	}
 
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index d8de6a8..665efca 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -139,6 +139,14 @@
 	bool
 	depends on ARM || ARM64
 
+config QCOM_SCM_32
+	def_bool y
+	depends on QCOM_SCM && ARM
+
+config QCOM_SCM_64
+	def_bool y
+	depends on QCOM_SCM && ARM64
+
 source "drivers/firmware/broadcom/Kconfig"
 source "drivers/firmware/google/Kconfig"
 source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 000830f..2ee8347 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -13,7 +13,8 @@
 obj-$(CONFIG_ISCSI_IBFT)	+= iscsi_ibft.o
 obj-$(CONFIG_FIRMWARE_MEMMAP)	+= memmap.o
 obj-$(CONFIG_QCOM_SCM)		+= qcom_scm.o
-obj-$(CONFIG_QCOM_SCM)		+= qcom_scm-32.o
+obj-$(CONFIG_QCOM_SCM_64)	+= qcom_scm-64.o
+obj-$(CONFIG_QCOM_SCM_32)	+= qcom_scm-32.o
 CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
 
 obj-y				+= broadcom/
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index e29560e..950c87f 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -13,6 +13,7 @@
  */
 
 #include <linux/efi.h>
+#include <linux/sort.h>
 #include <asm/efi.h>
 
 #include "efistub.h"
@@ -305,6 +306,44 @@
  */
 #define EFI_RT_VIRTUAL_BASE	0x40000000
 
+static int cmp_mem_desc(const void *l, const void *r)
+{
+	const efi_memory_desc_t *left = l, *right = r;
+
+	return (left->phys_addr > right->phys_addr) ? 1 : -1;
+}
+
+/*
+ * Returns whether region @left ends exactly where region @right starts,
+ * or false if either argument is NULL.
+ */
+static bool regions_are_adjacent(efi_memory_desc_t *left,
+				 efi_memory_desc_t *right)
+{
+	u64 left_end;
+
+	if (left == NULL || right == NULL)
+		return false;
+
+	left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
+
+	return left_end == right->phys_addr;
+}
+
+/*
+ * Returns whether region @left and region @right have compatible memory type
+ * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
+ */
+static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
+						      efi_memory_desc_t *right)
+{
+	static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
+					 EFI_MEMORY_WC | EFI_MEMORY_UC |
+					 EFI_MEMORY_RUNTIME;
+
+	return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
+}
+
 /*
  * efi_get_virtmap() - create a virtual mapping for the EFI memory map
  *
@@ -317,33 +356,52 @@
 		     int *count)
 {
 	u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
-	efi_memory_desc_t *out = runtime_map;
+	efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
 	int l;
 
-	for (l = 0; l < map_size; l += desc_size) {
-		efi_memory_desc_t *in = (void *)memory_map + l;
+	/*
+	 * To work around potential issues with the Properties Table feature
+	 * introduced in UEFI 2.5, which may split PE/COFF executable images
+	 * in memory into several RuntimeServicesCode and RuntimeServicesData
+	 * regions, we need to preserve the relative offsets between adjacent
+	 * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
+	 * The easiest way to find adjacent regions is to sort the memory map
+	 * before traversing it.
+	 */
+	sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
+
+	for (l = 0; l < map_size; l += desc_size, prev = in) {
 		u64 paddr, size;
 
+		in = (void *)memory_map + l;
 		if (!(in->attribute & EFI_MEMORY_RUNTIME))
 			continue;
 
+		paddr = in->phys_addr;
+		size = in->num_pages * EFI_PAGE_SIZE;
+
 		/*
 		 * Make the mapping compatible with 64k pages: this allows
 		 * a 4k page size kernel to kexec a 64k page size kernel and
 		 * vice versa.
 		 */
-		paddr = round_down(in->phys_addr, SZ_64K);
-		size = round_up(in->num_pages * EFI_PAGE_SIZE +
-				in->phys_addr - paddr, SZ_64K);
+		if (!regions_are_adjacent(prev, in) ||
+		    !regions_have_compatible_memory_type_attrs(prev, in)) {
 
-		/*
-		 * Avoid wasting memory on PTEs by choosing a virtual base that
-		 * is compatible with section mappings if this region has the
-		 * appropriate size and physical alignment. (Sections are 2 MB
-		 * on 4k granule kernels)
-		 */
-		if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
-			efi_virt_base = round_up(efi_virt_base, SZ_2M);
+			paddr = round_down(in->phys_addr, SZ_64K);
+			size += in->phys_addr - paddr;
+
+			/*
+			 * Avoid wasting memory on PTEs by choosing a virtual
+			 * base that is compatible with section mappings if this
+			 * region has the appropriate size and physical
+			 * alignment. (Sections are 2 MB on 4k granule kernels)
+			 */
+			if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+				efi_virt_base = round_up(efi_virt_base, SZ_2M);
+			else
+				efi_virt_base = round_up(efi_virt_base, SZ_64K);
+		}
 
 		in->virt_addr = efi_virt_base + in->phys_addr - paddr;
 		efi_virt_base += size;
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
new file mode 100644
index 0000000..bb6555f
--- /dev/null
+++ b/drivers/firmware/qcom_scm-64.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/qcom_scm.h>
+
+/**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the cold boot address of the cpus. Any cpu outside the supported
+ * range would be removed from the cpu present mask.
+ */
+int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+	return -ENOTSUPP;
+}
+
+/**
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+ */
+int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+{
+	return -ENOTSUPP;
+}
+
+/**
+ * qcom_scm_cpu_power_down() - Power down the cpu
+ * @flags - Flags to flush cache
+ *
+ * This is an end point to power down cpu. If there was a pending interrupt,
+ * the control would return from this function, otherwise, the cpu jumps to the
+ * warm boot entry point set for this cpu upon reset.
+ */
+void __qcom_scm_cpu_power_down(u32 flags)
+{
+}
+
+int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+	return -ENOTSUPP;
+}
+
+int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+{
+	return -ENOTSUPP;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 77f1d7c..9416e0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -672,8 +672,12 @@
 		/* disp clock */
 		adev->clock.default_dispclk =
 			le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
-		if (adev->clock.default_dispclk == 0)
-			adev->clock.default_dispclk = 54000; /* 540 Mhz */
+		/* set a reasonable default for DP */
+		if (adev->clock.default_dispclk < 53900) {
+			DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
+				 adev->clock.default_dispclk / 100);
+			adev->clock.default_dispclk = 60000;
+		}
 		adev->clock.dp_extclk =
 			le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
 		adev->clock.current_dispclk = adev->clock.default_dispclk;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 1c3fc99..8e99514 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -208,44 +208,6 @@
 	return ret;
 }
 
-static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
-				     cgs_handle_t *handle)
-{
-	CGS_FUNC_ADEV;
-	int r;
-	uint32_t dma_handle;
-	struct drm_gem_object *obj;
-	struct amdgpu_bo *bo;
-	struct drm_device *dev = adev->ddev;
-	struct drm_file *file_priv = NULL, *priv;
-
-	mutex_lock(&dev->struct_mutex);
-	list_for_each_entry(priv, &dev->filelist, lhead) {
-		rcu_read_lock();
-		if (priv->pid == get_pid(task_pid(current)))
-			file_priv = priv;
-		rcu_read_unlock();
-		if (file_priv)
-			break;
-	}
-	mutex_unlock(&dev->struct_mutex);
-	r = dev->driver->prime_fd_to_handle(dev,
-					    file_priv, dmabuf_fd,
-					    &dma_handle);
-	spin_lock(&file_priv->table_lock);
-
-	/* Check if we currently have a reference on the object */
-	obj = idr_find(&file_priv->object_idr, dma_handle);
-	if (obj == NULL) {
-		spin_unlock(&file_priv->table_lock);
-		return -EINVAL;
-	}
-	spin_unlock(&file_priv->table_lock);
-	bo = gem_to_amdgpu_bo(obj);
-	*handle = (cgs_handle_t)bo;
-	return 0;
-}
-
 static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
 {
 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -810,7 +772,6 @@
 };
 
 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
-	amdgpu_cgs_import_gpu_mem,
 	amdgpu_cgs_add_irq_source,
 	amdgpu_cgs_irq_get,
 	amdgpu_cgs_irq_put
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 749420f..fd16652 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -156,7 +156,8 @@
 	uint64_t *chunk_array_user;
 	uint64_t *chunk_array;
 	struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
-	unsigned size, i;
+	unsigned size;
+	int i;
 	int ret;
 
 	if (cs->in.num_chunks == 0)
@@ -176,7 +177,7 @@
 
 	/* get chunks */
 	INIT_LIST_HEAD(&p->validated);
-	chunk_array_user = (uint64_t __user *)(cs->in.chunks);
+	chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
 	if (copy_from_user(chunk_array, chunk_array_user,
 			   sizeof(uint64_t)*cs->in.num_chunks)) {
 		ret = -EFAULT;
@@ -196,7 +197,7 @@
 		struct drm_amdgpu_cs_chunk user_chunk;
 		uint32_t __user *cdata;
 
-		chunk_ptr = (void __user *)chunk_array[i];
+		chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
 		if (copy_from_user(&user_chunk, chunk_ptr,
 				       sizeof(struct drm_amdgpu_cs_chunk))) {
 			ret = -EFAULT;
@@ -207,7 +208,7 @@
 		p->chunks[i].length_dw = user_chunk.length_dw;
 
 		size = p->chunks[i].length_dw;
-		cdata = (void __user *)user_chunk.chunk_data;
+		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
 		p->chunks[i].user_ptr = cdata;
 
 		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index e3d7077..dc29ed8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -85,8 +85,6 @@
 	/* We borrow the event spin lock for protecting flip_status */
 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
-	/* set the proper interrupt */
-	amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id);
 	/* do the flip (mmio) */
 	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
 	/* set the flip status */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index adb4835..b190c2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -242,11 +242,11 @@
 	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 #endif
 	/* topaz */
-	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-	{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-	{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-	{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
-	{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+	{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
 	/* tonga */
 	{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
 	{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 8a122b1..96290d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -402,3 +402,19 @@
 		return true;
 	return false;
 }
+
+void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
+{
+	struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
+	struct drm_fb_helper *fb_helper;
+	int ret;
+
+	if (!afbdev)
+		return;
+
+	fb_helper = &afbdev->helper;
+
+	ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+	if (ret)
+		DRM_DEBUG("failed to restore crtc mode\n");
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 8c735f54..5d11e79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -485,7 +485,7 @@
  * Outdated mess for old drm with Xorg being in charge (void function now).
  */
 /**
- * amdgpu_driver_firstopen_kms - drm callback for last close
+ * amdgpu_driver_lastclose_kms - drm callback for last close
  *
  * @dev: drm dev pointer
  *
@@ -493,6 +493,9 @@
  */
 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
 {
+	struct amdgpu_device *adev = dev->dev_private;
+
+	amdgpu_fbdev_restore_mode(adev);
 	vga_switcheroo_process_delayed_switch();
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 64efe5b..7bd470d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -567,6 +567,7 @@
 void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
 int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
 bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
+void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev);
 
 void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1e14531..53d551f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -455,8 +455,10 @@
 		return -ENOMEM;
 
 	r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
-	if (r)
+	if (r) {
+		kfree(ib);
 		return r;
+	}
 	ib->length_dw = 0;
 
 	/* walk over the address space and update the page directory */
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index cd6edc4..1e0bba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -1279,8 +1279,7 @@
 			amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
 		}
 		if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-			amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
-							       ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+			amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
 		if (ext_encoder)
 			amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
 	} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 82e8d07..a1a35a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6185,6 +6185,11 @@
 	if (!amdgpu_dpm)
 		return 0;
 
+	/* init the sysfs and debugfs files late */
+	ret = amdgpu_pm_sysfs_init(adev);
+	if (ret)
+		return ret;
+
 	ret = ci_set_temperature_range(adev);
 	if (ret)
 		return ret;
@@ -6232,9 +6237,6 @@
 	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
 	if (amdgpu_dpm == 1)
 		amdgpu_pm_print_power_states(adev);
-	ret = amdgpu_pm_sysfs_init(adev);
-	if (ret)
-		goto dpm_failed;
 	mutex_unlock(&adev->pm.mutex);
 	DRM_INFO("amdgpu: dpm initialized\n");
 
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 4b6ce74..484710c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1567,6 +1567,9 @@
 	int ret, i;
 	u16 tmp16;
 
+	if (pci_is_root_bus(adev->pdev->bus))
+		return;
+
 	if (amdgpu_pcie_gen2 == 0)
 		return;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 44fa96a..2e3373e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -596,6 +596,12 @@
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	if (amdgpu_dpm) {
+		int ret;
+		/* init the sysfs and debugfs files late */
+		ret = amdgpu_pm_sysfs_init(adev);
+		if (ret)
+			return ret;
+
 		/* powerdown unused blocks for now */
 		cz_dpm_powergate_uvd(adev, true);
 		cz_dpm_powergate_vce(adev, true);
@@ -632,10 +638,6 @@
 	if (amdgpu_dpm == 1)
 		amdgpu_pm_print_power_states(adev);
 
-	ret = amdgpu_pm_sysfs_init(adev);
-	if (ret)
-		goto dpm_init_failed;
-
 	mutex_unlock(&adev->pm.mutex);
 	DRM_INFO("amdgpu: dpm initialized\n");
 
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index e4d101b..d4c82b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -255,6 +255,24 @@
 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 
+static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Enable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Disable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
 /**
  * dce_v10_0_page_flip - pageflip callback.
  *
@@ -2663,9 +2681,10 @@
 		dce_v10_0_vga_enable(crtc, true);
 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
 		dce_v10_0_vga_enable(crtc, false);
-		/* Make sure VBLANK interrupt is still enabled */
+		/* Make sure VBLANK and PFLIP interrupts are still enabled */
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
+		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
 		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
 		dce_v10_0_crtc_load_lut(crtc);
 		break;
@@ -3025,6 +3044,8 @@
 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 	}
 
+	dce_v10_0_pageflip_interrupt_init(adev);
+
 	return 0;
 }
 
@@ -3039,6 +3060,8 @@
 		dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 	}
 
+	dce_v10_0_pageflip_interrupt_fini(adev);
+
 	return 0;
 }
 
@@ -3050,6 +3073,8 @@
 
 	dce_v10_0_hpd_fini(adev);
 
+	dce_v10_0_pageflip_interrupt_fini(adev);
+
 	return 0;
 }
 
@@ -3075,6 +3100,8 @@
 	/* initialize hpd */
 	dce_v10_0_hpd_init(adev);
 
+	dce_v10_0_pageflip_interrupt_init(adev);
+
 	return 0;
 }
 
@@ -3369,7 +3396,6 @@
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-	amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
 	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 6411e82..7e1cf5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -233,6 +233,24 @@
 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 
+static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Enable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Disable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
 /**
  * dce_v11_0_page_flip - pageflip callback.
  *
@@ -2640,9 +2658,10 @@
 		dce_v11_0_vga_enable(crtc, true);
 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
 		dce_v11_0_vga_enable(crtc, false);
-		/* Make sure VBLANK interrupt is still enabled */
+		/* Make sure VBLANK and PFLIP interrupts are still enabled */
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
+		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
 		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
 		dce_v11_0_crtc_load_lut(crtc);
 		break;
@@ -2888,7 +2907,7 @@
 
 	switch (adev->asic_type) {
 	case CHIP_CARRIZO:
-		adev->mode_info.num_crtc = 4;
+		adev->mode_info.num_crtc = 3;
 		adev->mode_info.num_hpd = 6;
 		adev->mode_info.num_dig = 9;
 		break;
@@ -3000,6 +3019,8 @@
 		dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 	}
 
+	dce_v11_0_pageflip_interrupt_init(adev);
+
 	return 0;
 }
 
@@ -3014,6 +3035,8 @@
 		dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 	}
 
+	dce_v11_0_pageflip_interrupt_fini(adev);
+
 	return 0;
 }
 
@@ -3025,6 +3048,8 @@
 
 	dce_v11_0_hpd_fini(adev);
 
+	dce_v11_0_pageflip_interrupt_fini(adev);
+
 	return 0;
 }
 
@@ -3051,6 +3076,8 @@
 	/* initialize hpd */
 	dce_v11_0_hpd_init(adev);
 
+	dce_v11_0_pageflip_interrupt_init(adev);
+
 	return 0;
 }
 
@@ -3345,7 +3372,6 @@
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-	amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
 	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index c86911c..34b9c2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -204,6 +204,24 @@
 		return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
 }
 
+static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Enable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_get(adev, &adev->pageflip_irq, i);
+}
+
+static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	/* Disable pflip interrupts */
+	for (i = 0; i < adev->mode_info.num_crtc; i++)
+		amdgpu_irq_put(adev, &adev->pageflip_irq, i);
+}
+
 /**
  * dce_v8_0_page_flip - pageflip callback.
  *
@@ -2575,9 +2593,10 @@
 		dce_v8_0_vga_enable(crtc, true);
 		amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
 		dce_v8_0_vga_enable(crtc, false);
-		/* Make sure VBLANK interrupt is still enabled */
+		/* Make sure VBLANK and PFLIP interrupts are still enabled */
 		type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
 		amdgpu_irq_update(adev, &adev->crtc_irq, type);
+		amdgpu_irq_update(adev, &adev->pageflip_irq, type);
 		drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
 		dce_v8_0_crtc_load_lut(crtc);
 		break;
@@ -2933,6 +2952,8 @@
 		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 	}
 
+	dce_v8_0_pageflip_interrupt_init(adev);
+
 	return 0;
 }
 
@@ -2947,6 +2968,8 @@
 		dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
 	}
 
+	dce_v8_0_pageflip_interrupt_fini(adev);
+
 	return 0;
 }
 
@@ -2958,6 +2981,8 @@
 
 	dce_v8_0_hpd_fini(adev);
 
+	dce_v8_0_pageflip_interrupt_fini(adev);
+
 	return 0;
 }
 
@@ -2981,6 +3006,8 @@
 	/* initialize hpd */
 	dce_v8_0_hpd_init(adev);
 
+	dce_v8_0_pageflip_interrupt_init(adev);
+
 	return 0;
 }
 
@@ -3376,7 +3403,6 @@
 	spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
 	drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
-	amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
 	queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
 
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 774528a..fab5471 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1262,6 +1262,12 @@
 	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
 	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
 	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+	/* reset addr and status */
+	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+	if (!addr && !status)
+		return 0;
+
 	dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
 		entry->src_id, entry->src_data);
 	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
@@ -1269,8 +1275,6 @@
 	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
 		status);
 	gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
-	/* reset addr and status */
-	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 9a07742..7bc9e9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1262,6 +1262,12 @@
 	addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
 	status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
 	mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+	/* reset addr and status */
+	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+	if (!addr && !status)
+		return 0;
+
 	dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
 		entry->src_id, entry->src_data);
 	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
@@ -1269,8 +1275,6 @@
 	dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
 		status);
 	gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
-	/* reset addr and status */
-	WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 94ec04a..9745ed3 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2995,6 +2995,12 @@
 {
 	/* powerdown unused blocks for now */
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int ret;
+
+	/* init the sysfs and debugfs files late */
+	ret = amdgpu_pm_sysfs_init(adev);
+	if (ret)
+		return ret;
 
 	kv_dpm_powergate_acp(adev, true);
 	kv_dpm_powergate_samu(adev, true);
@@ -3038,9 +3044,6 @@
 	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
 	if (amdgpu_dpm == 1)
 		amdgpu_pm_print_power_states(adev);
-	ret = amdgpu_pm_sysfs_init(adev);
-	if (ret)
-		goto dpm_failed;
 	mutex_unlock(&adev->pm.mutex);
 	DRM_INFO("amdgpu: dpm initialized\n");
 
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index b55ceb1..0bac8702 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1005,6 +1005,9 @@
 	u32 mask;
 	int ret;
 
+	if (pci_is_root_bus(adev->pdev->bus))
+		return;
+
 	if (amdgpu_pcie_gen2 == 0)
 		return;
 
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
index 488642f..3b47ae3 100644
--- a/drivers/gpu/drm/amd/include/cgs_linux.h
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -27,19 +27,6 @@
 #include "cgs_common.h"
 
 /**
- * cgs_import_gpu_mem() - Import dmabuf handle
- * @cgs_device:  opaque device handle
- * @dmabuf_fd:   DMABuf file descriptor
- * @handle:      memory handle (output)
- *
- * Must be called in the process context that dmabuf_fd belongs to.
- *
- * Return:  0 on success, -errno otherwise
- */
-typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
-				    cgs_handle_t *handle);
-
-/**
  * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
  * @private_data:  private data provided to cgs_add_irq_source
  * @src_id:        interrupt source ID
@@ -114,16 +101,12 @@
 typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
 
 struct cgs_os_ops {
-	cgs_import_gpu_mem_t import_gpu_mem;
-
 	/* IRQ handling */
 	cgs_add_irq_source_t add_irq_source;
 	cgs_irq_get_t irq_get;
 	cgs_irq_put_t irq_put;
 };
 
-#define cgs_import_gpu_mem(dev,dmabuf_fd,handle)		\
-	CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
 #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
 	CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler,	\
 		    private_data)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index e23df5f..5bca390 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -53,8 +53,8 @@
 				  struct drm_dp_mst_port *port,
 				  int offset, int size, u8 *bytes);
 
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
-				    struct drm_dp_mst_branch *mstb);
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+				     struct drm_dp_mst_branch *mstb);
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
 					   struct drm_dp_mst_branch *mstb,
 					   struct drm_dp_mst_port *port);
@@ -804,8 +804,6 @@
 	struct drm_dp_mst_port *port, *tmp;
 	bool wake_tx = false;
 
-	cancel_work_sync(&mstb->mgr->work);
-
 	/*
 	 * destroy all ports - don't need lock
 	 * as there are no more references to the mst branch
@@ -863,29 +861,33 @@
 {
 	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+
 	if (!port->input) {
 		port->vcpi.num_slots = 0;
 
 		kfree(port->cached_edid);
 
-		/* we can't destroy the connector here, as
-		   we might be holding the mode_config.mutex
-		   from an EDID retrieval */
+		/*
+		 * The only time we don't have a connector
+		 * on an output port is if the connector init
+		 * fails.
+		 */
 		if (port->connector) {
+			/* we can't destroy the connector here, as
+			 * we might be holding the mode_config.mutex
+			 * from an EDID retrieval */
+
 			mutex_lock(&mgr->destroy_connector_lock);
 			list_add(&port->next, &mgr->destroy_connector_list);
 			mutex_unlock(&mgr->destroy_connector_lock);
 			schedule_work(&mgr->destroy_connector_work);
 			return;
 		}
+		/* no need to clean up vcpi
+		 * as if we have no connector we never setup a vcpi */
 		drm_dp_port_teardown_pdt(port, port->pdt);
-
-		if (!port->input && port->vcpi.vcpi > 0)
-			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
 	}
 	kfree(port);
-
-	(*mgr->cbs->hotplug)(mgr);
 }
 
 static void drm_dp_put_port(struct drm_dp_mst_port *port)
@@ -1027,8 +1029,8 @@
 	}
 }
 
-static void build_mst_prop_path(struct drm_dp_mst_port *port,
-				struct drm_dp_mst_branch *mstb,
+static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
+				int pnum,
 				char *proppath,
 				size_t proppath_size)
 {
@@ -1041,7 +1043,7 @@
 		snprintf(temp, sizeof(temp), "-%d", port_num);
 		strlcat(proppath, temp, proppath_size);
 	}
-	snprintf(temp, sizeof(temp), "-%d", port->port_num);
+	snprintf(temp, sizeof(temp), "-%d", pnum);
 	strlcat(proppath, temp, proppath_size);
 }
 
@@ -1105,22 +1107,32 @@
 		drm_dp_port_teardown_pdt(port, old_pdt);
 
 		ret = drm_dp_port_setup_pdt(port);
-		if (ret == true) {
+		if (ret == true)
 			drm_dp_send_link_address(mstb->mgr, port->mstb);
-			port->mstb->link_address_sent = true;
-		}
 	}
 
 	if (created && !port->input) {
 		char proppath[255];
-		build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
-		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
 
-		if (port->port_num >= 8) {
-			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+		build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
+		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+		if (!port->connector) {
+			/* remove it from the port list */
+			mutex_lock(&mstb->mgr->lock);
+			list_del(&port->next);
+			mutex_unlock(&mstb->mgr->lock);
+			/* drop port list reference */
+			drm_dp_put_port(port);
+			goto out;
 		}
+		if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
+			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+			drm_mode_connector_set_tile_property(port->connector);
+		}
+		(*mstb->mgr->cbs->register_connector)(port->connector);
 	}
 
+out:
 	/* put reference to this port */
 	drm_dp_put_port(port);
 }
@@ -1202,10 +1214,9 @@
 {
 	struct drm_dp_mst_port *port;
 	struct drm_dp_mst_branch *mstb_child;
-	if (!mstb->link_address_sent) {
+	if (!mstb->link_address_sent)
 		drm_dp_send_link_address(mgr, mstb);
-		mstb->link_address_sent = true;
-	}
+
 	list_for_each_entry(port, &mstb->ports, next) {
 		if (port->input)
 			continue;
@@ -1458,8 +1469,8 @@
 	mutex_unlock(&mgr->qlock);
 }
 
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
-				    struct drm_dp_mst_branch *mstb)
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+				     struct drm_dp_mst_branch *mstb)
 {
 	int len;
 	struct drm_dp_sideband_msg_tx *txmsg;
@@ -1467,11 +1478,12 @@
 
 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
 	if (!txmsg)
-		return -ENOMEM;
+		return;
 
 	txmsg->dst = mstb;
 	len = build_link_address(txmsg);
 
+	mstb->link_address_sent = true;
 	drm_dp_queue_down_tx(mgr, txmsg);
 
 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
@@ -1499,11 +1511,12 @@
 			}
 			(*mgr->cbs->hotplug)(mgr);
 		}
-	} else
+	} else {
+		mstb->link_address_sent = false;
 		DRM_DEBUG_KMS("link address failed %d\n", ret);
+	}
 
 	kfree(txmsg);
-	return 0;
 }
 
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -1978,6 +1991,8 @@
 	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
 			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
 	mutex_unlock(&mgr->lock);
+	flush_work(&mgr->work);
+	flush_work(&mgr->destroy_connector_work);
 }
 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
 
@@ -2263,10 +2278,10 @@
 
 	if (port->cached_edid)
 		edid = drm_edid_duplicate(port->cached_edid);
-	else
+	else {
 		edid = drm_get_edid(connector, &port->aux.ddc);
-
-	drm_mode_connector_set_tile_property(connector);
+		drm_mode_connector_set_tile_property(connector);
+	}
 	drm_dp_put_port(port);
 	return edid;
 }
@@ -2671,7 +2686,7 @@
 {
 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
 	struct drm_dp_mst_port *port;
-
+	bool send_hotplug = false;
 	/*
 	 * Not a regular list traverse as we have to drop the destroy
 	 * connector lock before destroying the connector, to avoid AB->BA
@@ -2694,7 +2709,10 @@
 		if (!port->input && port->vcpi.vcpi > 0)
 			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
 		kfree(port);
+		send_hotplug = true;
 	}
+	if (send_hotplug)
+		(*mgr->cbs->hotplug)(mgr);
 }
 
 /**
@@ -2747,6 +2765,7 @@
  */
 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
 {
+	flush_work(&mgr->work);
 	flush_work(&mgr->destroy_connector_work);
 	mutex_lock(&mgr->payload_lock);
 	kfree(mgr->payloads);
@@ -2782,12 +2801,13 @@
 	if (msgs[num - 1].flags & I2C_M_RD)
 		reading = true;
 
-	if (!reading) {
+	if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
 		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
 		ret = -EIO;
 		goto out;
 	}
 
+	memset(&msg, 0, sizeof(msg));
 	msg.req_type = DP_REMOTE_I2C_READ;
 	msg.u.i2c_read.num_transactions = num - 1;
 	msg.u.i2c_read.port_number = port->port_num;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 418d299..ca08c47 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -345,7 +345,11 @@
 		struct drm_crtc *crtc = mode_set->crtc;
 		int ret;
 
-		if (crtc->funcs->cursor_set) {
+		if (crtc->funcs->cursor_set2) {
+			ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+			if (ret)
+				error = true;
+		} else if (crtc->funcs->cursor_set) {
 			ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
 			if (ret)
 				error = true;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d734780..a18164f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -94,7 +94,18 @@
 }
 
 #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
-static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+/**
+ * drm_kms_helper_poll_enable_locked - re-enable output polling.
+ * @dev: drm_device
+ *
+ * This function re-enables the output polling work without
+ * locking the mode_config mutex.
+ *
+ * This is like drm_kms_helper_poll_enable() however it is to be
+ * called from a context where the mode_config mutex is locked
+ * already.
+ */
+void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
 {
 	bool poll = false;
 	struct drm_connector *connector;
@@ -113,6 +124,8 @@
 	if (poll)
 		schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
 }
+EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
+
 
 static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
 							      uint32_t maxX, uint32_t maxY, bool merge_type_bits)
@@ -174,7 +187,7 @@
 
 	/* Re-enable polling in case the global poll config changed. */
 	if (drm_kms_helper_poll != dev->mode_config.poll_running)
-		__drm_kms_helper_poll_enable(dev);
+		drm_kms_helper_poll_enable_locked(dev);
 
 	dev->mode_config.poll_running = drm_kms_helper_poll;
 
@@ -428,7 +441,7 @@
 void drm_kms_helper_poll_enable(struct drm_device *dev)
 {
 	mutex_lock(&dev->mode_config.mutex);
-	__drm_kms_helper_poll_enable(dev);
+	drm_kms_helper_poll_enable_locked(dev);
 	mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 0f6cd33..684bd4a 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -235,18 +235,12 @@
 			   char *buf)
 {
 	struct drm_connector *connector = to_drm_connector(device);
-	struct drm_device *dev = connector->dev;
-	uint64_t dpms_status;
-	int ret;
+	int dpms;
 
-	ret = drm_object_property_get_value(&connector->base,
-					    dev->mode_config.dpms_property,
-					    &dpms_status);
-	if (ret)
-		return 0;
+	dpms = READ_ONCE(connector->dpms);
 
 	return snprintf(buf, PAGE_SIZE, "%s\n",
-			drm_get_dpms_name((int)dpms_status));
+			drm_get_dpms_name(dpms));
 }
 
 static ssize_t enabled_show(struct device *device,
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index cbdb78e..e6cbaca 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -37,7 +37,6 @@
  * DECON stands for Display and Enhancement controller.
  */
 
-#define DECON_DEFAULT_FRAMERATE 60
 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128
 
 #define WINDOWS_NR	2
@@ -165,16 +164,6 @@
 	return (clkdiv < 0x100) ? clkdiv : 0xff;
 }
 
-static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	if (adjusted_mode->vrefresh == 0)
-		adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE;
-
-	return true;
-}
-
 static void decon_commit(struct exynos_drm_crtc *crtc)
 {
 	struct decon_context *ctx = crtc->ctx;
@@ -637,7 +626,6 @@
 static const struct exynos_drm_crtc_ops decon_crtc_ops = {
 	.enable = decon_enable,
 	.disable = decon_disable,
-	.mode_fixup = decon_mode_fixup,
 	.commit = decon_commit,
 	.enable_vblank = decon_enable_vblank,
 	.disable_vblank = decon_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index d66ade0..124fb9a 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1383,28 +1383,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int exynos_dp_suspend(struct device *dev)
-{
-	struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
-	exynos_dp_disable(&dp->encoder);
-	return 0;
-}
-
-static int exynos_dp_resume(struct device *dev)
-{
-	struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
-	exynos_dp_enable(&dp->encoder);
-	return 0;
-}
-#endif
-
-static const struct dev_pm_ops exynos_dp_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
-};
-
 static const struct of_device_id exynos_dp_match[] = {
 	{ .compatible = "samsung,exynos5-dp" },
 	{},
@@ -1417,7 +1395,6 @@
 	.driver		= {
 		.name	= "exynos-dp",
 		.owner	= THIS_MODULE,
-		.pm	= &exynos_dp_pm_ops,
 		.of_match_table = exynos_dp_match,
 	},
 };
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index c68a6a2..7f55ba6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -28,7 +28,6 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
 
 int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
 {
@@ -39,7 +38,6 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
 
 int exynos_drm_device_subdrv_probe(struct drm_device *dev)
 {
@@ -69,7 +67,6 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
 
 int exynos_drm_device_subdrv_remove(struct drm_device *dev)
 {
@@ -87,7 +84,6 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
 
 int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
 {
@@ -111,7 +107,6 @@
 	}
 	return ret;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
 
 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
 {
@@ -122,4 +117,3 @@
 			subdrv->close(dev, subdrv->dev, file);
 	}
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 0872aa2f..ed28823 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -41,20 +41,6 @@
 		exynos_crtc->ops->disable(exynos_crtc);
 }
 
-static bool
-exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
-			    const struct drm_display_mode *mode,
-			    struct drm_display_mode *adjusted_mode)
-{
-	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
-	if (exynos_crtc->ops->mode_fixup)
-		return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
-						    adjusted_mode);
-
-	return true;
-}
-
 static void
 exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
@@ -99,7 +85,6 @@
 static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
 	.enable		= exynos_drm_crtc_enable,
 	.disable	= exynos_drm_crtc_disable,
-	.mode_fixup	= exynos_drm_crtc_mode_fixup,
 	.mode_set_nofb	= exynos_drm_crtc_mode_set_nofb,
 	.atomic_begin	= exynos_crtc_atomic_begin,
 	.atomic_flush	= exynos_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 831d2e4..ae9e6b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -304,6 +304,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
 {
 	struct drm_connector *connector;
@@ -340,6 +341,7 @@
 
 	return 0;
 }
+#endif
 
 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
 {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index b7ba21d..6c717ba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -82,7 +82,6 @@
  *
  * @enable: enable the device
  * @disable: disable the device
- * @mode_fixup: fix mode data before applying it
  * @commit: set current hw specific display mode to hw.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
@@ -103,9 +102,6 @@
 struct exynos_drm_crtc_ops {
 	void (*enable)(struct exynos_drm_crtc *crtc);
 	void (*disable)(struct exynos_drm_crtc *crtc);
-	bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
-				const struct drm_display_mode *mode,
-				struct drm_display_mode *adjusted_mode);
 	void (*commit)(struct exynos_drm_crtc *crtc);
 	int (*enable_vblank)(struct exynos_drm_crtc *crtc);
 	void (*disable_vblank)(struct exynos_drm_crtc *crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 2a65235..dd3a5e6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1206,23 +1206,6 @@
 	.set_addr = fimc_dst_set_addr,
 };
 
-static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
-{
-	DRM_DEBUG_KMS("enable[%d]\n", enable);
-
-	if (enable) {
-		clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
-		clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
-		ctx->suspended = false;
-	} else {
-		clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
-		clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
-		ctx->suspended = true;
-	}
-
-	return 0;
-}
-
 static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
 {
 	struct fimc_context *ctx = dev_id;
@@ -1780,6 +1763,24 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+	DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+	if (enable) {
+		clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
+		clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
+		ctx->suspended = false;
+	} else {
+		clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
+		clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
+		ctx->suspended = true;
+	}
+
+	return 0;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int fimc_suspend(struct device *dev)
 {
@@ -1806,7 +1807,6 @@
 }
 #endif
 
-#ifdef CONFIG_PM
 static int fimc_runtime_suspend(struct device *dev)
 {
 	struct fimc_context *ctx = get_fimc_context(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 750a9e6..3d1aba6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -41,7 +41,6 @@
  * CPU Interface.
  */
 
-#define FIMD_DEFAULT_FRAMERATE 60
 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128
 
 /* position control register for hardware window 0, 2 ~ 4.*/
@@ -377,16 +376,6 @@
 	return (clkdiv < 0x100) ? clkdiv : 0xff;
 }
 
-static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc,
-		const struct drm_display_mode *mode,
-		struct drm_display_mode *adjusted_mode)
-{
-	if (adjusted_mode->vrefresh == 0)
-		adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
-
-	return true;
-}
-
 static void fimd_commit(struct exynos_drm_crtc *crtc)
 {
 	struct fimd_context *ctx = crtc->ctx;
@@ -882,13 +871,12 @@
 		return;
 
 	val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
-	writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+	writel(val, ctx->regs + DP_MIE_CLKCON);
 }
 
 static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
 	.enable = fimd_enable,
 	.disable = fimd_disable,
-	.mode_fixup = fimd_mode_fixup,
 	.commit = fimd_commit,
 	.enable_vblank = fimd_enable_vblank,
 	.disable_vblank = fimd_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3734c34..c17efdb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1059,7 +1059,6 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
 
 int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
 				 struct drm_file *file)
@@ -1230,7 +1229,6 @@
 	g2d_put_cmdlist(g2d, node);
 	return ret;
 }
-EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
 
 int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
 			  struct drm_file *file)
@@ -1293,7 +1291,6 @@
 out:
 	return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
 
 static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f12fbc3..407afed 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -56,39 +56,35 @@
 	nr_pages = obj->size >> PAGE_SHIFT;
 
 	if (!is_drm_iommu_supported(dev)) {
-		dma_addr_t start_addr;
-		unsigned int i = 0;
-
 		obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
 		if (!obj->pages) {
 			DRM_ERROR("failed to allocate pages.\n");
 			return -ENOMEM;
 		}
+	}
 
-		obj->cookie = dma_alloc_attrs(dev->dev,
-					obj->size,
-					&obj->dma_addr, GFP_KERNEL,
-					&obj->dma_attrs);
-		if (!obj->cookie) {
-			DRM_ERROR("failed to allocate buffer.\n");
+	obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr,
+				      GFP_KERNEL, &obj->dma_attrs);
+	if (!obj->cookie) {
+		DRM_ERROR("failed to allocate buffer.\n");
+		if (obj->pages)
 			drm_free_large(obj->pages);
-			return -ENOMEM;
-		}
+		return -ENOMEM;
+	}
+
+	if (obj->pages) {
+		dma_addr_t start_addr;
+		unsigned int i = 0;
 
 		start_addr = obj->dma_addr;
 		while (i < nr_pages) {
-			obj->pages[i] = phys_to_page(start_addr);
+			obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
+							       start_addr));
 			start_addr += PAGE_SIZE;
 			i++;
 		}
 	} else {
-		obj->pages = dma_alloc_attrs(dev->dev, obj->size,
-					&obj->dma_addr, GFP_KERNEL,
-					&obj->dma_attrs);
-		if (!obj->pages) {
-			DRM_ERROR("failed to allocate buffer.\n");
-			return -ENOMEM;
-		}
+		obj->pages = obj->cookie;
 	}
 
 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
@@ -110,15 +106,11 @@
 	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
 			(unsigned long)obj->dma_addr, obj->size);
 
-	if (!is_drm_iommu_supported(dev)) {
-		dma_free_attrs(dev->dev, obj->size, obj->cookie,
-				(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
-		drm_free_large(obj->pages);
-	} else
-		dma_free_attrs(dev->dev, obj->size, obj->pages,
-				(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+	dma_free_attrs(dev->dev, obj->size, obj->cookie,
+			(dma_addr_t)obj->dma_addr, &obj->dma_attrs);
 
-	obj->dma_addr = (dma_addr_t)NULL;
+	if (!is_drm_iommu_supported(dev))
+		drm_free_large(obj->pages);
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -156,18 +148,14 @@
 	 * once dmabuf's refcount becomes 0.
 	 */
 	if (obj->import_attach)
-		goto out;
-
-	exynos_drm_free_buf(exynos_gem_obj);
-
-out:
-	drm_gem_free_mmap_offset(obj);
+		drm_prime_gem_destroy(obj, exynos_gem_obj->sgt);
+	else
+		exynos_drm_free_buf(exynos_gem_obj);
 
 	/* release file pointer to gem object. */
 	drm_gem_object_release(obj);
 
 	kfree(exynos_gem_obj);
-	exynos_gem_obj = NULL;
 }
 
 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
@@ -190,8 +178,7 @@
 	return exynos_gem_obj->size;
 }
 
-
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
 						      unsigned long size)
 {
 	struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -212,6 +199,13 @@
 		return ERR_PTR(ret);
 	}
 
+	ret = drm_gem_create_mmap_offset(obj);
+	if (ret < 0) {
+		drm_gem_object_release(obj);
+		kfree(exynos_gem_obj);
+		return ERR_PTR(ret);
+	}
+
 	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
 
 	return exynos_gem_obj;
@@ -313,7 +307,7 @@
 	drm_gem_object_unreference_unlocked(obj);
 }
 
-int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
+static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
 				      struct vm_area_struct *vma)
 {
 	struct drm_device *drm_dev = exynos_gem_obj->base.dev;
@@ -342,7 +336,8 @@
 
 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
 				      struct drm_file *file_priv)
-{	struct exynos_drm_gem_obj *exynos_gem_obj;
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
 	struct drm_exynos_gem_info *args = data;
 	struct drm_gem_object *obj;
 
@@ -402,6 +397,7 @@
 			       struct drm_mode_create_dumb *args)
 {
 	struct exynos_drm_gem_obj *exynos_gem_obj;
+	unsigned int flags;
 	int ret;
 
 	/*
@@ -413,16 +409,12 @@
 	args->pitch = args->width * ((args->bpp + 7) / 8);
 	args->size = args->pitch * args->height;
 
-	if (is_drm_iommu_supported(dev)) {
-		exynos_gem_obj = exynos_drm_gem_create(dev,
-			EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
-			args->size);
-	} else {
-		exynos_gem_obj = exynos_drm_gem_create(dev,
-			EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
-			args->size);
-	}
+	if (is_drm_iommu_supported(dev))
+		flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
+	else
+		flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
 
+	exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size);
 	if (IS_ERR(exynos_gem_obj)) {
 		dev_warn(dev->dev, "FB allocation failed.\n");
 		return PTR_ERR(exynos_gem_obj);
@@ -460,14 +452,9 @@
 		goto unlock;
 	}
 
-	ret = drm_gem_create_mmap_offset(obj);
-	if (ret)
-		goto out;
-
 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
-out:
 	drm_gem_object_unreference(obj);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
@@ -543,7 +530,6 @@
 
 err_close_vm:
 	drm_gem_vm_close(vma);
-	drm_gem_free_mmap_offset(obj);
 
 	return ret;
 }
@@ -588,6 +574,8 @@
 	if (ret < 0)
 		goto err_free_large;
 
+	exynos_gem_obj->sgt = sgt;
+
 	if (sgt->nents == 1) {
 		/* always physically continuous memory if sgt->nents is 1. */
 		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index cd62f84..b62d100 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -39,6 +39,7 @@
  *	- this address could be physical address without IOMMU and
  *	device address with IOMMU.
  * @pages: Array of backing pages.
+ * @sgt: Imported sg_table.
  *
  * P.S. this object would be transferred to user as kms_bo.handle so
  *	user can access the buffer through kms_bo.handle.
@@ -52,6 +53,7 @@
 	dma_addr_t		dma_addr;
 	struct dma_attrs	dma_attrs;
 	struct page		**pages;
+	struct sg_table		*sgt;
 };
 
 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -59,10 +61,6 @@
 /* destroy a buffer with gem object */
 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
 
-/* create a private gem object and initialize it. */
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
-						      unsigned long size);
-
 /* create a new buffer with gem object */
 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
 						unsigned int flags,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 425e706..2f5c118 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -786,6 +786,7 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM
 static int rotator_clk_crtl(struct rot_context *rot, bool enable)
 {
 	if (enable) {
@@ -822,7 +823,6 @@
 }
 #endif
 
-#ifdef CONFIG_PM
 static int rotator_runtime_suspend(struct device *dev)
 {
 	struct rot_context *rot = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 3e4be5a..6ade068 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -462,11 +462,17 @@
 	drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 
 	drm_mode_connector_set_path_property(connector, pathprop);
+	return connector;
+}
+
+static void intel_dp_register_mst_connector(struct drm_connector *connector)
+{
+	struct intel_connector *intel_connector = to_intel_connector(connector);
+	struct drm_device *dev = connector->dev;
 	drm_modeset_lock_all(dev);
 	intel_connector_add_to_fbdev(intel_connector);
 	drm_modeset_unlock_all(dev);
 	drm_connector_register(&intel_connector->base);
-	return connector;
 }
 
 static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -512,6 +518,7 @@
 
 static struct drm_dp_mst_topology_cbs mst_cbs = {
 	.add_connector = intel_dp_add_mst_connector,
+	.register_connector = intel_dp_register_mst_connector,
 	.destroy_connector = intel_dp_destroy_mst_connector,
 	.hotplug = intel_dp_mst_hotplug,
 };
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 53c0173..b177857 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -180,7 +180,7 @@
 
 	/* Enable polling and queue hotplug re-enabling. */
 	if (hpd_disabled) {
-		drm_kms_helper_poll_enable(dev);
+		drm_kms_helper_poll_enable_locked(dev);
 		mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
 				 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
 	}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 72e0edd..7412cae 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -484,18 +484,18 @@
 	status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
 
 	read_pointer = ring->next_context_status_buffer;
-	write_pointer = status_pointer & 0x07;
+	write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
 	if (read_pointer > write_pointer)
-		write_pointer += 6;
+		write_pointer += GEN8_CSB_ENTRIES;
 
 	spin_lock(&ring->execlist_lock);
 
 	while (read_pointer < write_pointer) {
 		read_pointer++;
 		status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
-				(read_pointer % 6) * 8);
+				(read_pointer % GEN8_CSB_ENTRIES) * 8);
 		status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
-				(read_pointer % 6) * 8 + 4);
+				(read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
 
 		if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
 			continue;
@@ -521,10 +521,12 @@
 	spin_unlock(&ring->execlist_lock);
 
 	WARN(submit_contexts > 2, "More than two context complete events?\n");
-	ring->next_context_status_buffer = write_pointer % 6;
+	ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
 
 	I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-		   _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
+		   _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
+				 ((u32)ring->next_context_status_buffer &
+				  GEN8_CSB_PTR_MASK) << 8));
 }
 
 static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -1422,6 +1424,7 @@
 {
 	struct drm_device *dev = ring->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u8 next_context_status_buffer_hw;
 
 	I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
 	I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1436,7 +1439,29 @@
 		   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
 		   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
 	POSTING_READ(RING_MODE_GEN7(ring));
-	ring->next_context_status_buffer = 0;
+
+	/*
+	 * Instead of resetting the Context Status Buffer (CSB) read pointer to
+	 * zero, we need to read the write pointer from hardware and use its
+	 * value because "this register is power context save restored".
+	 * Effectively, these states have been observed:
+	 *
+	 *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
+	 * BDW  | CSB regs not reset       | CSB regs reset       |
+	 * CHT  | CSB regs not reset       | CSB regs not reset   |
+	 */
+	next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
+						   & GEN8_CSB_PTR_MASK);
+
+	/*
+	 * When the CSB registers are reset (also after power-up / gpu reset),
+	 * CSB write pointer is set to all 1's, which is not valid, use '5' in
+	 * this special case, so the first element read is CSB[0].
+	 */
+	if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
+		next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
+
+	ring->next_context_status_buffer = next_context_status_buffer_hw;
 	DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
 
 	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 64f89f99..3c63bb3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -25,6 +25,8 @@
 #define _INTEL_LRC_H_
 
 #define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x07
 
 /* Execlists regs */
 #define RING_ELSP(ring)			((ring)->mmio_base+0x230)
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index af7fdb3..7401cf9 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -246,7 +246,8 @@
 	}
 
 	if (power_well->data == SKL_DISP_PW_1) {
-		intel_prepare_ddi(dev);
+		if (!dev_priv->power_domains.initializing)
+			intel_prepare_ddi(dev);
 		gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
 	}
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cc6c228..e905c00 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -469,9 +469,13 @@
 	if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
 		dev->mode_config.max_width = 4096;
 		dev->mode_config.max_height = 4096;
-	} else {
+	} else
+	if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) {
 		dev->mode_config.max_width = 8192;
 		dev->mode_config.max_height = 8192;
+	} else {
+		dev->mode_config.max_width = 16384;
+		dev->mode_config.max_height = 16384;
 	}
 
 	dev->mode_config.preferred_depth = 24;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 2791701..59f27e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -178,8 +178,30 @@
 	return 0;
 }
 
+static int
+nouveau_fbcon_open(struct fb_info *info, int user)
+{
+	struct nouveau_fbdev *fbcon = info->par;
+	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+	int ret = pm_runtime_get_sync(drm->dev->dev);
+	if (ret < 0 && ret != -EACCES)
+		return ret;
+	return 0;
+}
+
+static int
+nouveau_fbcon_release(struct fb_info *info, int user)
+{
+	struct nouveau_fbdev *fbcon = info->par;
+	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
+	pm_runtime_put(drm->dev->dev);
+	return 0;
+}
+
 static struct fb_ops nouveau_fbcon_ops = {
 	.owner = THIS_MODULE,
+	.fb_open = nouveau_fbcon_open,
+	.fb_release = nouveau_fbcon_release,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_fillrect = nouveau_fbcon_fillrect,
@@ -195,6 +217,8 @@
 
 static struct fb_ops nouveau_fbcon_sw_ops = {
 	.owner = THIS_MODULE,
+	.fb_open = nouveau_fbcon_open,
+	.fb_release = nouveau_fbcon_release,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index 65af314..a7d69ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -267,6 +267,12 @@
 		index = NVKM_I2C_BUS_PRI;
 		if (init->outp && init->outp->i2c_upper_default)
 			index = NVKM_I2C_BUS_SEC;
+	} else
+	if (index == 0x80) {
+		index = NVKM_I2C_BUS_PRI;
+	} else
+	if (index == 0x81) {
+		index = NVKM_I2C_BUS_SEC;
 	}
 
 	bus = nvkm_i2c_bus_find(i2c, index);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index e0ec2a6..212800e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -8,7 +8,10 @@
 	void *(*init)(struct nvkm_bios *, const char *);
 	void  (*fini)(void *);
 	u32   (*read)(void *, u32 offset, u32 length, struct nvkm_bios *);
+	u32   (*size)(void *);
 	bool rw;
+	bool ignore_checksum;
+	bool no_pcir;
 };
 
 int nvbios_extend(struct nvkm_bios *, u32 length);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 792f017..b2557e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -45,7 +45,7 @@
 		u32 read = mthd->func->read(data, start, limit - start, bios);
 		bios->size = start + read;
 	}
-	return bios->size >= limit;
+	return bios->size >= upto;
 }
 
 static int
@@ -55,14 +55,22 @@
 	struct nvbios_image image;
 	int score = 1;
 
-	if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
-		nvkm_debug(subdev, "%08x: header fetch failed\n", offset);
-		return 0;
-	}
+	if (mthd->func->no_pcir) {
+		image.base = 0;
+		image.type = 0;
+		image.size = mthd->func->size(mthd->data);
+		image.last = 1;
+	} else {
+		if (!shadow_fetch(bios, mthd, offset + 0x1000)) {
+			nvkm_debug(subdev, "%08x: header fetch failed\n",
+				   offset);
+			return 0;
+		}
 
-	if (!nvbios_image(bios, idx, &image)) {
-		nvkm_debug(subdev, "image %d invalid\n", idx);
-		return 0;
+		if (!nvbios_image(bios, idx, &image)) {
+			nvkm_debug(subdev, "image %d invalid\n", idx);
+			return 0;
+		}
 	}
 	nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
 		   image.base, image.type, image.size);
@@ -74,7 +82,8 @@
 
 	switch (image.type) {
 	case 0x00:
-		if (nvbios_checksum(&bios->data[image.base], image.size)) {
+		if (!mthd->func->ignore_checksum &&
+		    nvbios_checksum(&bios->data[image.base], image.size)) {
 			nvkm_debug(subdev, "%08x: checksum failed\n",
 				   image.base);
 			if (mthd->func->rw)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index bd60d7d..4bf486b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -21,6 +21,7 @@
  *
  */
 #include "priv.h"
+
 #include <core/pci.h>
 
 #if defined(__powerpc__)
@@ -33,17 +34,26 @@
 of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
 {
 	struct priv *priv = data;
-	if (offset + length <= priv->size) {
+	if (offset < priv->size) {
+		length = min_t(u32, length, priv->size - offset);
 		memcpy_fromio(bios->data + offset, priv->data + offset, length);
 		return length;
 	}
 	return 0;
 }
 
+static u32
+of_size(void *data)
+{
+	struct priv *priv = data;
+	return priv->size;
+}
+
 static void *
 of_init(struct nvkm_bios *bios, const char *name)
 {
-	struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev;
+	struct nvkm_device *device = bios->subdev.device;
+	struct pci_dev *pdev = device->func->pci(device)->pdev;
 	struct device_node *dn;
 	struct priv *priv;
 	if (!(dn = pci_device_to_OF_node(pdev)))
@@ -62,7 +72,10 @@
 	.init = of_init,
 	.fini = (void(*)(void *))kfree,
 	.read = of_read,
+	.size = of_size,
 	.rw = false,
+	.ignore_checksum = true,
+	.no_pcir = true,
 };
 #else
 const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
index 814cb51..385a90f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c
@@ -35,6 +35,8 @@
 nvkm_device_agp_quirks[] = {
 	/* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */
 	{ PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 },
+	/* SiS 761 does not support AGP cards, use PCI mode */
+	{ PCI_VENDOR_ID_SI, 0x0761, PCI_ANY_ID, PCI_ANY_ID, 0 },
 	{},
 };
 
@@ -137,8 +139,10 @@
 	while (quirk->hostbridge_vendor) {
 		if (info.device->vendor == quirk->hostbridge_vendor &&
 		    info.device->device == quirk->hostbridge_device &&
-		    pci->pdev->vendor == quirk->chip_vendor &&
-		    pci->pdev->device == quirk->chip_device) {
+		    (quirk->chip_vendor == (u16)PCI_ANY_ID ||
+		    pci->pdev->vendor == quirk->chip_vendor) &&
+		    (quirk->chip_device == (u16)PCI_ANY_ID ||
+		    pci->pdev->device == quirk->chip_device)) {
 			nvkm_info(subdev, "forcing default agp mode to %dX, "
 					  "use NvAGP=<mode> to override\n",
 				  quirk->mode);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index dd845f8..183aea1 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -244,6 +244,10 @@
 	ret = qxl_bo_reserve(bo, false);
 	if (ret)
 		return ret;
+	ret = qxl_bo_pin(bo, bo->type, NULL);
+	qxl_bo_unreserve(bo);
+	if (ret)
+		return ret;
 
 	qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0,
 			  &norect, one_clip_rect, inc);
@@ -257,7 +261,11 @@
 	}
 	drm_vblank_put(dev, qcrtc->index);
 
-	qxl_bo_unreserve(bo);
+	ret = qxl_bo_reserve(bo, false);
+	if (!ret) {
+		qxl_bo_unpin(bo);
+		qxl_bo_unreserve(bo);
+	}
 
 	return 0;
 }
@@ -618,7 +626,7 @@
 		  adjusted_mode->hdisplay,
 		  adjusted_mode->vdisplay);
 
-	if (qcrtc->index == 0)
+	if (bo->is_primary == false)
 		recreate_primary = true;
 
 	if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 41c422f..c4a5526 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -144,14 +144,17 @@
 
 	spin_lock_irqsave(&qfbdev->dirty.lock, flags);
 
-	if (qfbdev->dirty.y1 < y)
-		y = qfbdev->dirty.y1;
-	if (qfbdev->dirty.y2 > y2)
-		y2 = qfbdev->dirty.y2;
-	if (qfbdev->dirty.x1 < x)
-		x = qfbdev->dirty.x1;
-	if (qfbdev->dirty.x2 > x2)
-		x2 = qfbdev->dirty.x2;
+	if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) &&
+	    (qfbdev->dirty.x2 - qfbdev->dirty.x1)) {
+		if (qfbdev->dirty.y1 < y)
+			y = qfbdev->dirty.y1;
+		if (qfbdev->dirty.y2 > y2)
+			y2 = qfbdev->dirty.y2;
+		if (qfbdev->dirty.x1 < x)
+			x = qfbdev->dirty.x1;
+		if (qfbdev->dirty.x2 > x2)
+			x2 = qfbdev->dirty.x2;
+	}
 
 	qfbdev->dirty.x1 = x;
 	qfbdev->dirty.x2 = x2;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b66ec33..4efa8e2 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -307,7 +307,7 @@
 		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
 		if (idr_ret < 0)
 			return idr_ret;
-		bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
+		bo = to_qxl_bo(entry->tv.bo);
 
 		(*release)->release_offset = create_rel->release_offset + 64;
 
@@ -316,8 +316,6 @@
 		info = qxl_release_map(qdev, *release);
 		info->id = idr_ret;
 		qxl_release_unmap(qdev, *release, info);
-
-		qxl_bo_unref(&bo);
 		return 0;
 	}
 
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index c387259..65adb9c 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1624,8 +1624,9 @@
 		} else
 			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-			args.ucAction = ATOM_LCD_BLON;
-			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+			atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
 		}
 		break;
 	case DRM_MODE_DPMS_STANDBY:
@@ -1706,8 +1707,7 @@
 				atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
 		}
 		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-			atombios_dig_transmitter_setup(encoder,
-						       ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+			atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
 		if (ext_encoder)
 			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
 		break;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d2e9e9e..6743174 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1633,18 +1633,8 @@
 	radeon_fbdev_init(rdev);
 	drm_kms_helper_poll_init(rdev->ddev);
 
-	if (rdev->pm.dpm_enabled) {
-		/* do dpm late init */
-		ret = radeon_pm_late_init(rdev);
-		if (ret) {
-			rdev->pm.dpm_enabled = false;
-			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
-		}
-		/* set the dpm state for PX since there won't be
-		 * a modeset to call this.
-		 */
-		radeon_pm_compute_clocks(rdev);
-	}
+	/* do pm late init */
+	ret = radeon_pm_late_init(rdev);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 5e09c06..744f5c4 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -265,7 +265,6 @@
 {
 	struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
 	struct drm_device *dev = master->base.dev;
-	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_connector *radeon_connector;
 	struct drm_connector *connector;
 
@@ -284,14 +283,22 @@
 	radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master);
 
 	drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+	drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 	drm_mode_connector_set_path_property(connector, pathprop);
 
+	return connector;
+}
+
+static void radeon_dp_register_mst_connector(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
 	drm_modeset_lock_all(dev);
 	radeon_fb_add_connector(rdev, connector);
 	drm_modeset_unlock_all(dev);
 
 	drm_connector_register(connector);
-	return connector;
 }
 
 static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -324,6 +331,7 @@
 
 struct drm_dp_mst_topology_cbs mst_cbs = {
 	.add_connector = radeon_dp_add_mst_connector,
+	.register_connector = radeon_dp_register_mst_connector,
 	.destroy_connector = radeon_dp_destroy_mst_connector,
 	.hotplug = radeon_dp_mst_hotplug,
 };
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 7214858..26da2f4 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -48,40 +48,10 @@
 	struct radeon_device *rdev;
 };
 
-/**
- * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
- *
- * @info: fbdev info
- *
- * This function hides the cursor on all CRTCs used by fbdev.
- */
-static int radeon_fb_helper_set_par(struct fb_info *info)
-{
-	int ret;
-
-	ret = drm_fb_helper_set_par(info);
-
-	/* XXX: with universal plane support fbdev will automatically disable
-	 * all non-primary planes (including the cursor)
-	 */
-	if (ret == 0) {
-		struct drm_fb_helper *fb_helper = info->par;
-		int i;
-
-		for (i = 0; i < fb_helper->crtc_count; i++) {
-			struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
-			radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
-		}
-	}
-
-	return ret;
-}
-
 static struct fb_ops radeonfb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
-	.fb_set_par = radeon_fb_helper_set_par,
+	.fb_set_par = drm_fb_helper_set_par,
 	.fb_fillrect = drm_fb_helper_cfb_fillrect,
 	.fb_copyarea = drm_fb_helper_cfb_copyarea,
 	.fb_imageblit = drm_fb_helper_cfb_imageblit,
@@ -427,3 +397,19 @@
 {
 	drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
 }
+
+void radeon_fbdev_restore_mode(struct radeon_device *rdev)
+{
+	struct radeon_fbdev *rfbdev = rdev->mode_info.rfbdev;
+	struct drm_fb_helper *fb_helper;
+	int ret;
+
+	if (!rfbdev)
+		return;
+
+	fb_helper = &rfbdev->helper;
+
+	ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+	if (ret)
+		DRM_DEBUG("failed to restore crtc mode\n");
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 4a119c2..0e932bf 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -598,7 +598,7 @@
  * Outdated mess for old drm with Xorg being in charge (void function now).
  */
 /**
- * radeon_driver_firstopen_kms - drm callback for last close
+ * radeon_driver_lastclose_kms - drm callback for last close
  *
  * @dev: drm dev pointer
  *
@@ -606,6 +606,9 @@
  */
 void radeon_driver_lastclose_kms(struct drm_device *dev)
 {
+	struct radeon_device *rdev = dev->dev_private;
+
+	radeon_fbdev_restore_mode(rdev);
 	vga_switcheroo_process_delayed_switch();
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index aecc3e3..457b026 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -980,6 +980,7 @@
 void radeon_fbdev_fini(struct radeon_device *rdev);
 void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
 bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
+void radeon_fbdev_restore_mode(struct radeon_device *rdev);
 
 void radeon_fb_output_poll_changed(struct radeon_device *rdev);
 
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 05751f3..44489cc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1326,14 +1326,6 @@
 	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
 
 	if (rdev->pm.num_power_states > 1) {
-		/* where's the best place to put these? */
-		ret = device_create_file(rdev->dev, &dev_attr_power_profile);
-		if (ret)
-			DRM_ERROR("failed to create device file for power profile\n");
-		ret = device_create_file(rdev->dev, &dev_attr_power_method);
-		if (ret)
-			DRM_ERROR("failed to create device file for power method\n");
-
 		if (radeon_debugfs_pm_init(rdev)) {
 			DRM_ERROR("Failed to register debugfs file for PM!\n");
 		}
@@ -1391,20 +1383,6 @@
 		goto dpm_failed;
 	rdev->pm.dpm_enabled = true;
 
-	ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
-	if (ret)
-		DRM_ERROR("failed to create device file for dpm state\n");
-	ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
-	if (ret)
-		DRM_ERROR("failed to create device file for dpm state\n");
-	/* XXX: these are noops for dpm but are here for backwards compat */
-	ret = device_create_file(rdev->dev, &dev_attr_power_profile);
-	if (ret)
-		DRM_ERROR("failed to create device file for power profile\n");
-	ret = device_create_file(rdev->dev, &dev_attr_power_method);
-	if (ret)
-		DRM_ERROR("failed to create device file for power method\n");
-
 	if (radeon_debugfs_pm_init(rdev)) {
 		DRM_ERROR("Failed to register debugfs file for dpm!\n");
 	}
@@ -1545,9 +1523,44 @@
 	int ret = 0;
 
 	if (rdev->pm.pm_method == PM_METHOD_DPM) {
-		mutex_lock(&rdev->pm.mutex);
-		ret = radeon_dpm_late_enable(rdev);
-		mutex_unlock(&rdev->pm.mutex);
+		if (rdev->pm.dpm_enabled) {
+			ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+			if (ret)
+				DRM_ERROR("failed to create device file for dpm state\n");
+			ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+			if (ret)
+				DRM_ERROR("failed to create device file for dpm state\n");
+			/* XXX: these are noops for dpm but are here for backwards compat */
+			ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+			if (ret)
+				DRM_ERROR("failed to create device file for power profile\n");
+			ret = device_create_file(rdev->dev, &dev_attr_power_method);
+			if (ret)
+				DRM_ERROR("failed to create device file for power method\n");
+
+			mutex_lock(&rdev->pm.mutex);
+			ret = radeon_dpm_late_enable(rdev);
+			mutex_unlock(&rdev->pm.mutex);
+			if (ret) {
+				rdev->pm.dpm_enabled = false;
+				DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+			} else {
+				/* set the dpm state for PX since there won't be
+				 * a modeset to call this.
+				 */
+				radeon_pm_compute_clocks(rdev);
+			}
+		}
+	} else {
+		if (rdev->pm.num_power_states > 1) {
+			/* where's the best place to put these? */
+			ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+			if (ret)
+				DRM_ERROR("failed to create device file for power profile\n");
+			ret = device_create_file(rdev->dev, &dev_attr_power_method);
+			if (ret)
+				DRM_ERROR("failed to create device file for power method\n");
+		}
 	}
 	return ret;
 }
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index e9115d3..e72bf46 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2928,6 +2928,7 @@
 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
 	{ 0, 0, 0, 0 },
 };
 
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index db8b491..5122639 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -34,8 +34,8 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
 
-	seq_printf(m, "fence %ld %lld\n",
-		   atomic64_read(&vgdev->fence_drv.last_seq),
+	seq_printf(m, "fence %llu %lld\n",
+		   (u64)atomic64_read(&vgdev->fence_drv.last_seq),
 		   vgdev->fence_drv.sync_seq);
 	return 0;
 }
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 1da6326..67097c9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -61,7 +61,7 @@
 {
 	struct virtio_gpu_fence *fence = to_virtio_fence(f);
 
-	snprintf(str, size, "%lu", atomic64_read(&fence->drv->last_seq));
+	snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
 }
 
 static const struct fence_ops virtio_fence_ops = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 5ae8f92..8a76821 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -681,6 +681,14 @@
 					 0, 0,
 					 DRM_MM_SEARCH_DEFAULT,
 					 DRM_MM_CREATE_DEFAULT);
+	if (ret) {
+		(void) vmw_cmdbuf_man_process(man);
+		ret = drm_mm_insert_node_generic(&man->mm, info->node,
+						 info->page_size, 0, 0,
+						 DRM_MM_SEARCH_DEFAULT,
+						 DRM_MM_CREATE_DEFAULT);
+	}
+
 	spin_unlock_bh(&man->lock);
 	info->done = !ret;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 64b5040..03f63c7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -657,7 +657,8 @@
 	struct vmw_resource *res = &user_srf->srf.res;
 
 	*p_base = NULL;
-	ttm_base_object_unref(&user_srf->backup_base);
+	if (user_srf->backup_base)
+		ttm_base_object_unref(&user_srf->backup_base);
 	vmw_resource_unreference(&res);
 }
 
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2f9aead..652afd1 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -204,6 +204,8 @@
 		spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
 		list_del(&channel->listentry);
 		spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
+
+		primary_channel = channel;
 	} else {
 		primary_channel = channel->primary_channel;
 		spin_lock_irqsave(&primary_channel->lock, flags);
@@ -211,6 +213,14 @@
 		primary_channel->num_sc--;
 		spin_unlock_irqrestore(&primary_channel->lock, flags);
 	}
+
+	/*
+	 * We need to free the bit for init_vp_index() to work in the case
+	 * of sub-channel, when we reload drivers like hv_netvsc.
+	 */
+	cpumask_clear_cpu(channel->target_cpu,
+			  &primary_channel->alloced_cpus_in_node);
+
 	free_channel(channel);
 }
 
@@ -458,6 +468,13 @@
 			continue;
 		}
 
+		/*
+		 * NOTE: in the case of sub-channel, we clear the sub-channel
+		 * related bit(s) in primary->alloced_cpus_in_node in
+		 * hv_process_channel_removal(), so when we reload drivers
+		 * like hv_netvsc in SMP guest, here we're able to re-allocate
+		 * bit from primary->alloced_cpus_in_node.
+		 */
 		if (!cpumask_test_cpu(cur_cpu,
 				&primary->alloced_cpus_in_node)) {
 			cpumask_set_cpu(cur_cpu,
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index 6cb89c0..1fd4685 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -470,6 +470,7 @@
 	{ .compatible = "stericsson,abx500-temp" },
 	{},
 };
+MODULE_DEVICE_TABLE(of, abx500_temp_match);
 #endif
 
 static struct platform_driver abx500_temp_driver = {
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index a3dae6d..82de3de 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,6 +539,7 @@
 	{ .compatible = "gpio-fan", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
 #endif /* CONFIG_OF_GPIO */
 
 static int gpio_fan_probe(struct platform_device *pdev)
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 2d9a712..3e23003 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -323,6 +323,7 @@
 	{ .compatible = "pwm-fan", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
 
 static struct platform_driver pwm_fan_driver = {
 	.probe		= pwm_fan_probe,
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 3dd2de3..472b882 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/i2c.h>
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
@@ -51,6 +52,22 @@
 }
 
 #ifdef CONFIG_ACPI
+/*
+ * The HCNT/LCNT information coming from ACPI should be the most accurate
+ * for given platform. However, some systems get it wrong. On such systems
+ * we get better results by calculating those based on the input clock.
+ */
+static const struct dmi_system_id dw_i2c_no_acpi_params[] = {
+	{
+		.ident = "Dell Inspiron 7348",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"),
+		},
+	},
+	{ }
+};
+
 static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
 			       u16 *hcnt, u16 *lcnt, u32 *sda_hold)
 {
@@ -58,6 +75,9 @@
 	acpi_handle handle = ACPI_HANDLE(&pdev->dev);
 	union acpi_object *obj;
 
+	if (dmi_check_system(dw_i2c_no_acpi_params))
+		return;
+
 	if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf)))
 		return;
 
@@ -253,12 +273,6 @@
 	adap->dev.parent = &pdev->dev;
 	adap->dev.of_node = pdev->dev.of_node;
 
-	r = i2c_add_numbered_adapter(adap);
-	if (r) {
-		dev_err(&pdev->dev, "failure adding adapter\n");
-		return r;
-	}
-
 	if (dev->pm_runtime_disabled) {
 		pm_runtime_forbid(&pdev->dev);
 	} else {
@@ -268,6 +282,13 @@
 		pm_runtime_enable(&pdev->dev);
 	}
 
+	r = i2c_add_numbered_adapter(adap);
+	if (r) {
+		dev_err(&pdev->dev, "failure adding adapter\n");
+		pm_runtime_disable(&pdev->dev);
+		return r;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d8361da..d8b5a8f 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -690,15 +690,16 @@
 		return ret;
 	}
 
+	pm_runtime_enable(dev);
+	platform_set_drvdata(pdev, priv);
+
 	ret = i2c_add_numbered_adapter(adap);
 	if (ret < 0) {
 		dev_err(dev, "reg adap failed: %d\n", ret);
+		pm_runtime_disable(dev);
 		return ret;
 	}
 
-	pm_runtime_enable(dev);
-	platform_set_drvdata(pdev, priv);
-
 	dev_info(dev, "probed\n");
 
 	return 0;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 50bfd8c..5df8196 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1243,17 +1243,19 @@
 	i2c->adap.nr = i2c->pdata->bus_num;
 	i2c->adap.dev.of_node = pdev->dev.of_node;
 
+	platform_set_drvdata(pdev, i2c);
+
+	pm_runtime_enable(&pdev->dev);
+
 	ret = i2c_add_numbered_adapter(&i2c->adap);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to add bus to i2c core\n");
+		pm_runtime_disable(&pdev->dev);
 		s3c24xx_i2c_deregister_cpufreq(i2c);
 		clk_unprepare(i2c->clk);
 		return ret;
 	}
 
-	platform_set_drvdata(pdev, i2c);
-
-	pm_runtime_enable(&pdev->dev);
 	pm_runtime_enable(&i2c->adap.dev);
 
 	dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 5f89f1e3..a59c311 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -694,12 +694,12 @@
 		goto err_clear_wakeup_irq;
 
 	status = dev_pm_domain_attach(&client->dev, true);
-	if (status != -EPROBE_DEFER) {
-		status = driver->probe(client, i2c_match_id(driver->id_table,
-					client));
-		if (status)
-			goto err_detach_pm_domain;
-	}
+	if (status == -EPROBE_DEFER)
+		goto err_clear_wakeup_irq;
+
+	status = driver->probe(client, i2c_match_id(driver->id_table, client));
+	if (status)
+		goto err_detach_pm_domain;
 
 	return 0;
 
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 3a3738f..cd4510a 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -620,7 +620,7 @@
 		.name = "C6-SKL",
 		.desc = "MWAIT 0x20",
 		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
-		.exit_latency = 75,
+		.exit_latency = 85,
 		.target_residency = 200,
 		.enter = &intel_idle,
 		.enter_freeze = intel_idle_freeze, },
@@ -636,11 +636,19 @@
 		.name = "C8-SKL",
 		.desc = "MWAIT 0x40",
 		.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
-		.exit_latency = 174,
+		.exit_latency = 200,
 		.target_residency = 800,
 		.enter = &intel_idle,
 		.enter_freeze = intel_idle_freeze, },
 	{
+		.name = "C9-SKL",
+		.desc = "MWAIT 0x50",
+		.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 480,
+		.target_residency = 5000,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
 		.name = "C10-SKL",
 		.desc = "MWAIT 0x60",
 		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index b1ab13f..59a2daf 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1232,14 +1232,32 @@
 	return true;
 }
 
+static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num)
+{
+	enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num);
+	enum rdma_transport_type transport =
+		rdma_node_get_transport(device->node_type);
+
+	return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB;
+}
+
+static bool cma_protocol_roce(const struct rdma_cm_id *id)
+{
+	struct ib_device *device = id->device;
+	const int port_num = id->port_num ?: rdma_start_port(device);
+
+	return cma_protocol_roce_dev_port(device, port_num);
+}
+
 static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
 			      const struct net_device *net_dev)
 {
 	const struct rdma_addr *addr = &id_priv->id.route.addr;
 
 	if (!net_dev)
-		/* This request is an AF_IB request */
-		return addr->src_addr.ss_family == AF_IB;
+		/* This request is an AF_IB request or a RoCE request */
+		return addr->src_addr.ss_family == AF_IB ||
+		       cma_protocol_roce(&id_priv->id);
 
 	return !addr->dev_addr.bound_dev_if ||
 	       (net_eq(dev_net(net_dev), &init_net) &&
@@ -1294,6 +1312,10 @@
 		if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
 			/* Assuming the protocol is AF_IB */
 			*net_dev = NULL;
+		} else if (cma_protocol_roce_dev_port(req.device, req.port)) {
+			/* TODO find the net dev matching the request parameters
+			 * through the RoCE GID table */
+			*net_dev = NULL;
 		} else {
 			return ERR_CAST(*net_dev);
 		}
@@ -1593,11 +1615,16 @@
 		if (ret)
 			goto err;
 	} else {
-		/* An AF_IB connection */
-		WARN_ON_ONCE(ss_family != AF_IB);
-
-		cma_translate_ib((struct sockaddr_ib *)cma_src_addr(id_priv),
-				 &rt->addr.dev_addr);
+		if (!cma_protocol_roce(listen_id) &&
+		    cma_any_addr(cma_src_addr(id_priv))) {
+			rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
+			rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
+			ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
+		} else if (!cma_any_addr(cma_src_addr(id_priv))) {
+			ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
+			if (ret)
+				goto err;
+		}
 	}
 	rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
 
@@ -1635,13 +1662,12 @@
 		if (ret)
 			goto err;
 	} else {
-		/* An AF_IB connection */
-		WARN_ON_ONCE(ss_family != AF_IB);
-
-		if (!cma_any_addr(cma_src_addr(id_priv)))
-			cma_translate_ib((struct sockaddr_ib *)
-						cma_src_addr(id_priv),
-					 &id->route.addr.dev_addr);
+		if (!cma_any_addr(cma_src_addr(id_priv))) {
+			ret = cma_translate_addr(cma_src_addr(id_priv),
+						 &id->route.addr.dev_addr);
+			if (ret)
+				goto err;
+		}
 	}
 
 	id_priv->state = RDMA_CM_CONNECT;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 41d6911..f1ccd40 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -245,7 +245,6 @@
 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
 	if (MLX5_CAP_GEN(mdev, apm))
 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
-	props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
 	if (MLX5_CAP_GEN(mdev, xrc))
 		props->device_cap_flags |= IB_DEVICE_XRC;
 	props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
@@ -795,53 +794,6 @@
 	return 0;
 }
 
-static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
-{
-	struct mlx5_create_mkey_mbox_in *in;
-	struct mlx5_mkey_seg *seg;
-	struct mlx5_core_mr mr;
-	int err;
-
-	in = kzalloc(sizeof(*in), GFP_KERNEL);
-	if (!in)
-		return -ENOMEM;
-
-	seg = &in->seg;
-	seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA;
-	seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
-	seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
-	seg->start_addr = 0;
-
-	err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
-				    NULL, NULL, NULL);
-	if (err) {
-		mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
-		goto err_in;
-	}
-
-	kfree(in);
-	*key = mr.key;
-
-	return 0;
-
-err_in:
-	kfree(in);
-
-	return err;
-}
-
-static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
-{
-	struct mlx5_core_mr mr;
-	int err;
-
-	memset(&mr, 0, sizeof(mr));
-	mr.key = key;
-	err = mlx5_core_destroy_mkey(dev->mdev, &mr);
-	if (err)
-		mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
-}
-
 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
 				      struct ib_ucontext *context,
 				      struct ib_udata *udata)
@@ -867,13 +819,6 @@
 			kfree(pd);
 			return ERR_PTR(-EFAULT);
 		}
-	} else {
-		err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
-		if (err) {
-			mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
-			kfree(pd);
-			return ERR_PTR(err);
-		}
 	}
 
 	return &pd->ibpd;
@@ -884,9 +829,6 @@
 	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
 	struct mlx5_ib_pd *mpd = to_mpd(pd);
 
-	if (!pd->uobject)
-		free_pa_mkey(mdev, mpd->pa_lkey);
-
 	mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
 	kfree(mpd);
 
@@ -1245,18 +1187,10 @@
 	struct ib_srq_init_attr attr;
 	struct mlx5_ib_dev *dev;
 	struct ib_cq_init_attr cq_attr = {.cqe = 1};
-	u32 rsvd_lkey;
 	int ret = 0;
 
 	dev = container_of(devr, struct mlx5_ib_dev, devr);
 
-	ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey);
-	if (ret) {
-		pr_err("Failed to query special context %d\n", ret);
-		return ret;
-	}
-	dev->ib_dev.local_dma_lkey = rsvd_lkey;
-
 	devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
 	if (IS_ERR(devr->p0)) {
 		ret = PTR_ERR(devr->p0);
@@ -1418,6 +1352,7 @@
 	strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
 	dev->ib_dev.owner		= THIS_MODULE;
 	dev->ib_dev.node_type		= RDMA_NODE_IB_CA;
+	dev->ib_dev.local_dma_lkey	= 0 /* not supported for now */;
 	dev->num_ports		= MLX5_CAP_GEN(mdev, num_ports);
 	dev->ib_dev.phys_port_cnt     = dev->num_ports;
 	dev->ib_dev.num_comp_vectors    =
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bb8cda7..22123b7 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -103,7 +103,6 @@
 struct mlx5_ib_pd {
 	struct ib_pd		ibpd;
 	u32			pdn;
-	u32			pa_lkey;
 };
 
 /* Use macros here so that don't have to duplicate
@@ -213,7 +212,6 @@
 	int			uuarn;
 
 	int			create_type;
-	u32			pa_lkey;
 
 	/* Store signature errors */
 	bool			signature_en;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index c745c6c..6f521a3 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -925,8 +925,6 @@
 			err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
 			if (err)
 				mlx5_ib_dbg(dev, "err %d\n", err);
-			else
-				qp->pa_lkey = to_mpd(pd)->pa_lkey;
 		}
 
 		if (err)
@@ -2045,7 +2043,7 @@
 		mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
 	dseg->addr = cpu_to_be64(mfrpl->map);
 	dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
-	dseg->lkey = cpu_to_be32(pd->pa_lkey);
+	dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
 }
 
 static __be32 send_ieth(struct ib_send_wr *wr)
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 70acda9..6a0bdfa 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1325,9 +1325,6 @@
 		 "%u.%u", nesadapter->firmware_version >> 16,
 		 nesadapter->firmware_version & 0x000000ff);
 	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
-	drvinfo->testinfo_len = 0;
-	drvinfo->eedump_len = 0;
-	drvinfo->regdump_len = 0;
 }
 
 
diff --git a/drivers/infiniband/hw/usnic/usnic.h b/drivers/infiniband/hw/usnic/usnic.h
index 5be13d8..f903502 100644
--- a/drivers/infiniband/hw/usnic/usnic.h
+++ b/drivers/infiniband/hw/usnic/usnic.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h
index 04a6622..7fe9502 100644
--- a/drivers/infiniband/hw/usnic/usnic_abi.h
+++ b/drivers/infiniband/hw/usnic/usnic_abi.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
index 3935672..596e0ed 100644
--- a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
+++ b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_common_util.h b/drivers/infiniband/hw/usnic/usnic_common_util.h
index 9d737ed..b54986d 100644
--- a/drivers/infiniband/hw/usnic/usnic_common_util.h
+++ b/drivers/infiniband/hw/usnic/usnic_common_util.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c
index 5d13860..5e55b8b 100644
--- a/drivers/infiniband/hw/usnic/usnic_debugfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.h b/drivers/infiniband/hw/usnic/usnic_debugfs.h
index 4087d24..98453e9 100644
--- a/drivers/infiniband/hw/usnic/usnic_debugfs.h
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c
index e3c9bd9..3c37dd5 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.c
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.c
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h
index 93713a2..3a8add9 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.h
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h
index e5a9297..525bf27 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 34c49b8..0c15bd8 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index db3588d..85dc3f9 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
index b0aafe8..b1458be 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 27dc67c..3412ea0 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
index 0d09b49..3d98e16 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 7df4382..f8e3211 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 0bd04ef..414eaa5 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_log.h b/drivers/infiniband/hw/usnic/usnic_log.h
index 75777a6..183fcb6 100644
--- a/drivers/infiniband/hw/usnic/usnic_log.h
+++ b/drivers/infiniband/hw/usnic/usnic_log.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c
index ddef6f77..de31838 100644
--- a/drivers/infiniband/hw/usnic/usnic_transport.c
+++ b/drivers/infiniband/hw/usnic/usnic_transport.c
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.h b/drivers/infiniband/hw/usnic/usnic_transport.h
index 7e5dc6d..9a7a2d9 100644
--- a/drivers/infiniband/hw/usnic/usnic_transport.h
+++ b/drivers/infiniband/hw/usnic/usnic_transport.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index cb2337f..645a5f6 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -7,7 +7,7 @@
  * licenses.  You may choose to be licensed under the terms of the GNU
  * General Public License (GPL) Version 2, available from the file
  * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * BSD license below:
  *
  *     Redistribution and use in source and binary forms, with or
  *     without modification, are permitted provided that the following
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
index 7044099..45ca7c1 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
index 3a4288e..42b4b4c 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
index d4f752e..c0b0b87 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c
index 656b88c..66de93f 100644
--- a/drivers/infiniband/hw/usnic/usnic_vnic.c
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.c
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.h b/drivers/infiniband/hw/usnic/usnic_vnic.h
index 14d931a..a08423e 100644
--- a/drivers/infiniband/hw/usnic/usnic_vnic.h
+++ b/drivers/infiniband/hw/usnic/usnic_vnic.h
@@ -1,9 +1,24 @@
 /*
  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
  *
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ca28736..edc5b85 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -80,7 +80,7 @@
 	IPOIB_NUM_WC		  = 4,
 
 	IPOIB_MAX_PATH_REC_QUEUE  = 3,
-	IPOIB_MAX_MCAST_QUEUE	  = 3,
+	IPOIB_MAX_MCAST_QUEUE	  = 64,
 
 	IPOIB_FLAG_OPER_UP	  = 0,
 	IPOIB_FLAG_INITIALIZED	  = 1,
@@ -495,6 +495,7 @@
 void ipoib_mcast_join_task(struct work_struct *work);
 void ipoib_mcast_carrier_on_task(struct work_struct *work);
 void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
+void ipoib_mcast_free(struct ipoib_mcast *mc);
 
 void ipoib_mcast_restart_task(struct work_struct *work);
 int ipoib_mcast_start_thread(struct net_device *dev);
@@ -548,6 +549,8 @@
 
 int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
 		       union ib_gid *mgid, int set_qkey);
+int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast);
+struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid);
 
 int ipoib_init_qp(struct net_device *dev);
 int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 36536ce..babba05 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1149,6 +1149,9 @@
 	unsigned long dt;
 	unsigned long flags;
 	int i;
+	LIST_HEAD(remove_list);
+	struct ipoib_mcast *mcast, *tmcast;
+	struct net_device *dev = priv->dev;
 
 	if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
 		return;
@@ -1176,6 +1179,19 @@
 							  lockdep_is_held(&priv->lock))) != NULL) {
 			/* was the neigh idle for two GC periods */
 			if (time_after(neigh_obsolete, neigh->alive)) {
+				u8 *mgid = neigh->daddr + 4;
+
+				/* Is this multicast ? */
+				if (*mgid == 0xff) {
+					mcast = __ipoib_mcast_find(dev, mgid);
+
+					if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+						list_del(&mcast->list);
+						rb_erase(&mcast->rb_node, &priv->multicast_tree);
+						list_add_tail(&mcast->list, &remove_list);
+					}
+				}
+
 				rcu_assign_pointer(*np,
 						   rcu_dereference_protected(neigh->hnext,
 									     lockdep_is_held(&priv->lock)));
@@ -1191,6 +1207,10 @@
 
 out_unlock:
 	spin_unlock_irqrestore(&priv->lock, flags);
+	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
+		ipoib_mcast_leave(dev, mcast);
+		ipoib_mcast_free(mcast);
+	}
 }
 
 static void ipoib_reap_neigh(struct work_struct *work)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 09a1748..d750a86 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -106,7 +106,7 @@
 		queue_delayed_work(priv->wq, &priv->mcast_task, 0);
 }
 
-static void ipoib_mcast_free(struct ipoib_mcast *mcast)
+void ipoib_mcast_free(struct ipoib_mcast *mcast)
 {
 	struct net_device *dev = mcast->dev;
 	int tx_dropped = 0;
@@ -153,7 +153,7 @@
 	return mcast;
 }
 
-static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
+struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	struct rb_node *n = priv->multicast_tree.rb_node;
@@ -508,17 +508,19 @@
 		rec.hop_limit	  = priv->broadcast->mcmember.hop_limit;
 
 		/*
-		 * Historically Linux IPoIB has never properly supported SEND
-		 * ONLY join. It emulated it by not providing all the required
-		 * attributes, which is enough to prevent group creation and
-		 * detect if there are full members or not. A major problem
-		 * with supporting SEND ONLY is detecting when the group is
-		 * auto-destroyed as IPoIB will cache the MLID..
+		 * Send-only IB Multicast joins do not work at the core
+		 * IB layer yet, so we can't use them here.  However,
+		 * we are emulating an Ethernet multicast send, which
+		 * does not require a multicast subscription and will
+		 * still send properly.  The most appropriate thing to
+		 * do is to create the group if it doesn't exist as that
+		 * most closely emulates the behavior, from a user space
+		 * application perspecitive, of Ethernet multicast
+		 * operation.  For now, we do a full join, maybe later
+		 * when the core IB layers support send only joins we
+		 * will use them.
 		 */
-#if 1
-		if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
-			comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
-#else
+#if 0
 		if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
 			rec.join_state = 4;
 #endif
@@ -675,7 +677,7 @@
 	return 0;
 }
 
-static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
+int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
 	int ret = 0;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 1ace5d8..f58ff96 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -97,6 +97,11 @@
 module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
 
+bool iser_always_reg = true;
+module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
+MODULE_PARM_DESC(always_register,
+		 "Always register memory, even for continuous memory regions (default:true)");
+
 bool iser_pi_enable = false;
 module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
 MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 86f6583..a5edd6e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -611,6 +611,7 @@
 extern bool iser_pi_enable;
 extern int iser_pi_guard;
 extern unsigned int iser_max_sectors;
+extern bool iser_always_reg;
 
 int iser_assign_reg_ops(struct iser_device *device);
 
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2493cc7..4c46d67 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -803,11 +803,12 @@
 iser_reg_prot_sg(struct iscsi_iser_task *task,
 		 struct iser_data_buf *mem,
 		 struct iser_fr_desc *desc,
+		 bool use_dma_key,
 		 struct iser_mem_reg *reg)
 {
 	struct iser_device *device = task->iser_conn->ib_conn.device;
 
-	if (mem->dma_nents == 1)
+	if (use_dma_key)
 		return iser_reg_dma(device, mem, reg);
 
 	return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
@@ -817,11 +818,12 @@
 iser_reg_data_sg(struct iscsi_iser_task *task,
 		 struct iser_data_buf *mem,
 		 struct iser_fr_desc *desc,
+		 bool use_dma_key,
 		 struct iser_mem_reg *reg)
 {
 	struct iser_device *device = task->iser_conn->ib_conn.device;
 
-	if (mem->dma_nents == 1)
+	if (use_dma_key)
 		return iser_reg_dma(device, mem, reg);
 
 	return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
@@ -836,14 +838,17 @@
 	struct iser_mem_reg *reg = &task->rdma_reg[dir];
 	struct iser_mem_reg *data_reg;
 	struct iser_fr_desc *desc = NULL;
+	bool use_dma_key;
 	int err;
 
 	err = iser_handle_unaligned_buf(task, mem, dir);
 	if (unlikely(err))
 		return err;
 
-	if (mem->dma_nents != 1 ||
-	    scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
+	use_dma_key = (mem->dma_nents == 1 && !iser_always_reg &&
+		       scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL);
+
+	if (!use_dma_key) {
 		desc = device->reg_ops->reg_desc_get(ib_conn);
 		reg->mem_h = desc;
 	}
@@ -853,7 +858,7 @@
 	else
 		data_reg = &task->desc.data_reg;
 
-	err = iser_reg_data_sg(task, mem, desc, data_reg);
+	err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
 	if (unlikely(err))
 		goto err_reg;
 
@@ -866,7 +871,8 @@
 			if (unlikely(err))
 				goto err_reg;
 
-			err = iser_reg_prot_sg(task, mem, desc, prot_reg);
+			err = iser_reg_prot_sg(task, mem, desc,
+					       use_dma_key, prot_reg);
 			if (unlikely(err))
 				goto err_reg;
 		}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index ae70cc1..85132d8 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -133,11 +133,15 @@
 			     (unsigned long)comp);
 	}
 
-	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
-				   IB_ACCESS_REMOTE_WRITE |
-				   IB_ACCESS_REMOTE_READ);
-	if (IS_ERR(device->mr))
-		goto dma_mr_err;
+	if (!iser_always_reg) {
+		int access = IB_ACCESS_LOCAL_WRITE |
+			     IB_ACCESS_REMOTE_WRITE |
+			     IB_ACCESS_REMOTE_READ;
+
+		device->mr = ib_get_dma_mr(device->pd, access);
+		if (IS_ERR(device->mr))
+			goto dma_mr_err;
+	}
 
 	INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
 				iser_event_handler);
@@ -147,7 +151,8 @@
 	return 0;
 
 handler_err:
-	ib_dereg_mr(device->mr);
+	if (device->mr)
+		ib_dereg_mr(device->mr);
 dma_mr_err:
 	for (i = 0; i < device->comps_used; i++)
 		tasklet_kill(&device->comps[i].tasklet);
@@ -173,7 +178,6 @@
 static void iser_free_device_ib_res(struct iser_device *device)
 {
 	int i;
-	BUG_ON(device->mr == NULL);
 
 	for (i = 0; i < device->comps_used; i++) {
 		struct iser_comp *comp = &device->comps[i];
@@ -184,7 +188,8 @@
 	}
 
 	(void)ib_unregister_event_handler(&device->event_handler);
-	(void)ib_dereg_mr(device->mr);
+	if (device->mr)
+		(void)ib_dereg_mr(device->mr);
 	ib_dealloc_pd(device->pd);
 
 	kfree(device->comps);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 403bd29..aa59037 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -238,8 +238,6 @@
 		rx_sg->lkey = device->pd->local_dma_lkey;
 	}
 
-	isert_conn->rx_desc_head = 0;
-
 	return 0;
 
 dma_map_fail:
@@ -634,7 +632,7 @@
 isert_init_conn(struct isert_conn *isert_conn)
 {
 	isert_conn->state = ISER_CONN_INIT;
-	INIT_LIST_HEAD(&isert_conn->accept_node);
+	INIT_LIST_HEAD(&isert_conn->node);
 	init_completion(&isert_conn->login_comp);
 	init_completion(&isert_conn->login_req_comp);
 	init_completion(&isert_conn->wait);
@@ -762,28 +760,15 @@
 	ret = isert_rdma_post_recvl(isert_conn);
 	if (ret)
 		goto out_conn_dev;
-	/*
-	 * Obtain the second reference now before isert_rdma_accept() to
-	 * ensure that any initiator generated REJECT CM event that occurs
-	 * asynchronously won't drop the last reference until the error path
-	 * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
-	 * isert_free_conn() -> isert_put_conn() -> kref_put().
-	 */
-	if (!kref_get_unless_zero(&isert_conn->kref)) {
-		isert_warn("conn %p connect_release is running\n", isert_conn);
-		goto out_conn_dev;
-	}
 
 	ret = isert_rdma_accept(isert_conn);
 	if (ret)
 		goto out_conn_dev;
 
-	mutex_lock(&isert_np->np_accept_mutex);
-	list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list);
-	mutex_unlock(&isert_np->np_accept_mutex);
+	mutex_lock(&isert_np->mutex);
+	list_add_tail(&isert_conn->node, &isert_np->accepted);
+	mutex_unlock(&isert_np->mutex);
 
-	isert_info("np %p: Allow accept_np to continue\n", np);
-	up(&isert_np->np_sem);
 	return 0;
 
 out_conn_dev:
@@ -831,13 +816,21 @@
 isert_connected_handler(struct rdma_cm_id *cma_id)
 {
 	struct isert_conn *isert_conn = cma_id->qp->qp_context;
+	struct isert_np *isert_np = cma_id->context;
 
 	isert_info("conn %p\n", isert_conn);
 
 	mutex_lock(&isert_conn->mutex);
-	if (isert_conn->state != ISER_CONN_FULL_FEATURE)
-		isert_conn->state = ISER_CONN_UP;
+	isert_conn->state = ISER_CONN_UP;
+	kref_get(&isert_conn->kref);
 	mutex_unlock(&isert_conn->mutex);
+
+	mutex_lock(&isert_np->mutex);
+	list_move_tail(&isert_conn->node, &isert_np->pending);
+	mutex_unlock(&isert_np->mutex);
+
+	isert_info("np %p: Allow accept_np to continue\n", isert_np);
+	up(&isert_np->sem);
 }
 
 static void
@@ -903,14 +896,14 @@
 
 	switch (event) {
 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
-		isert_np->np_cm_id = NULL;
+		isert_np->cm_id = NULL;
 		break;
 	case RDMA_CM_EVENT_ADDR_CHANGE:
-		isert_np->np_cm_id = isert_setup_id(isert_np);
-		if (IS_ERR(isert_np->np_cm_id)) {
+		isert_np->cm_id = isert_setup_id(isert_np);
+		if (IS_ERR(isert_np->cm_id)) {
 			isert_err("isert np %p setup id failed: %ld\n",
-				  isert_np, PTR_ERR(isert_np->np_cm_id));
-			isert_np->np_cm_id = NULL;
+				  isert_np, PTR_ERR(isert_np->cm_id));
+			isert_np->cm_id = NULL;
 		}
 		break;
 	default:
@@ -929,7 +922,7 @@
 	struct isert_conn *isert_conn;
 	bool terminating = false;
 
-	if (isert_np->np_cm_id == cma_id)
+	if (isert_np->cm_id == cma_id)
 		return isert_np_cma_handler(cma_id->context, event);
 
 	isert_conn = cma_id->qp->qp_context;
@@ -945,13 +938,13 @@
 	if (terminating)
 		goto out;
 
-	mutex_lock(&isert_np->np_accept_mutex);
-	if (!list_empty(&isert_conn->accept_node)) {
-		list_del_init(&isert_conn->accept_node);
+	mutex_lock(&isert_np->mutex);
+	if (!list_empty(&isert_conn->node)) {
+		list_del_init(&isert_conn->node);
 		isert_put_conn(isert_conn);
 		queue_work(isert_release_wq, &isert_conn->release_work);
 	}
-	mutex_unlock(&isert_np->np_accept_mutex);
+	mutex_unlock(&isert_np->mutex);
 
 out:
 	return 0;
@@ -962,6 +955,7 @@
 {
 	struct isert_conn *isert_conn = cma_id->qp->qp_context;
 
+	list_del_init(&isert_conn->node);
 	isert_conn->cm_id = NULL;
 	isert_put_conn(isert_conn);
 
@@ -1006,35 +1000,51 @@
 }
 
 static int
-isert_post_recv(struct isert_conn *isert_conn, u32 count)
+isert_post_recvm(struct isert_conn *isert_conn, u32 count)
 {
 	struct ib_recv_wr *rx_wr, *rx_wr_failed;
 	int i, ret;
-	unsigned int rx_head = isert_conn->rx_desc_head;
 	struct iser_rx_desc *rx_desc;
 
 	for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
-		rx_desc		= &isert_conn->rx_descs[rx_head];
-		rx_wr->wr_id	= (uintptr_t)rx_desc;
-		rx_wr->sg_list	= &rx_desc->rx_sg;
-		rx_wr->num_sge	= 1;
-		rx_wr->next	= rx_wr + 1;
-		rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
+		rx_desc = &isert_conn->rx_descs[i];
+		rx_wr->wr_id = (uintptr_t)rx_desc;
+		rx_wr->sg_list = &rx_desc->rx_sg;
+		rx_wr->num_sge = 1;
+		rx_wr->next = rx_wr + 1;
 	}
-
 	rx_wr--;
 	rx_wr->next = NULL; /* mark end of work requests list */
 
 	isert_conn->post_recv_buf_count += count;
 	ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
-				&rx_wr_failed);
+			   &rx_wr_failed);
 	if (ret) {
 		isert_err("ib_post_recv() failed with ret: %d\n", ret);
 		isert_conn->post_recv_buf_count -= count;
-	} else {
-		isert_dbg("Posted %d RX buffers\n", count);
-		isert_conn->rx_desc_head = rx_head;
 	}
+
+	return ret;
+}
+
+static int
+isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
+{
+	struct ib_recv_wr *rx_wr_failed, rx_wr;
+	int ret;
+
+	rx_wr.wr_id = (uintptr_t)rx_desc;
+	rx_wr.sg_list = &rx_desc->rx_sg;
+	rx_wr.num_sge = 1;
+	rx_wr.next = NULL;
+
+	isert_conn->post_recv_buf_count++;
+	ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
+	if (ret) {
+		isert_err("ib_post_recv() failed with ret: %d\n", ret);
+		isert_conn->post_recv_buf_count--;
+	}
+
 	return ret;
 }
 
@@ -1205,7 +1215,8 @@
 			if (ret)
 				return ret;
 
-			ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
+			ret = isert_post_recvm(isert_conn,
+					       ISERT_QP_MAX_RECV_DTOS);
 			if (ret)
 				return ret;
 
@@ -1278,7 +1289,7 @@
 }
 
 static struct iscsi_cmd
-*isert_allocate_cmd(struct iscsi_conn *conn)
+*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
 {
 	struct isert_conn *isert_conn = conn->context;
 	struct isert_cmd *isert_cmd;
@@ -1292,6 +1303,7 @@
 	isert_cmd = iscsit_priv_cmd(cmd);
 	isert_cmd->conn = isert_conn;
 	isert_cmd->iscsi_cmd = cmd;
+	isert_cmd->rx_desc = rx_desc;
 
 	return cmd;
 }
@@ -1303,9 +1315,9 @@
 {
 	struct iscsi_conn *conn = isert_conn->conn;
 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
-	struct scatterlist *sg;
 	int imm_data, imm_data_len, unsol_data, sg_nents, rc;
 	bool dump_payload = false;
+	unsigned int data_len;
 
 	rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
 	if (rc < 0)
@@ -1314,7 +1326,10 @@
 	imm_data = cmd->immediate_data;
 	imm_data_len = cmd->first_burst_len;
 	unsol_data = cmd->unsolicited_data;
+	data_len = cmd->se_cmd.data_length;
 
+	if (imm_data && imm_data_len == data_len)
+		cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
 	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
 	if (rc < 0) {
 		return 0;
@@ -1326,13 +1341,20 @@
 	if (!imm_data)
 		return 0;
 
-	sg = &cmd->se_cmd.t_data_sg[0];
-	sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
-
-	isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
-		  sg, sg_nents, &rx_desc->data[0], imm_data_len);
-
-	sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
+	if (imm_data_len != data_len) {
+		sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
+		sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
+				    &rx_desc->data[0], imm_data_len);
+		isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
+			  sg_nents, imm_data_len);
+	} else {
+		sg_init_table(&isert_cmd->sg, 1);
+		cmd->se_cmd.t_data_sg = &isert_cmd->sg;
+		cmd->se_cmd.t_data_nents = 1;
+		sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
+		isert_dbg("Transfer Immediate imm_data_len: %d\n",
+			  imm_data_len);
+	}
 
 	cmd->write_data_done += imm_data_len;
 
@@ -1407,6 +1429,15 @@
 	if (rc < 0)
 		return rc;
 
+	/*
+	 * multiple data-outs on the same command can arrive -
+	 * so post the buffer before hand
+	 */
+	rc = isert_post_recv(isert_conn, rx_desc);
+	if (rc) {
+		isert_err("ib_post_recv failed with %d\n", rc);
+		return rc;
+	}
 	return 0;
 }
 
@@ -1479,7 +1510,7 @@
 
 	switch (opcode) {
 	case ISCSI_OP_SCSI_CMD:
-		cmd = isert_allocate_cmd(conn);
+		cmd = isert_allocate_cmd(conn, rx_desc);
 		if (!cmd)
 			break;
 
@@ -1493,7 +1524,7 @@
 					rx_desc, (unsigned char *)hdr);
 		break;
 	case ISCSI_OP_NOOP_OUT:
-		cmd = isert_allocate_cmd(conn);
+		cmd = isert_allocate_cmd(conn, rx_desc);
 		if (!cmd)
 			break;
 
@@ -1506,7 +1537,7 @@
 						(unsigned char *)hdr);
 		break;
 	case ISCSI_OP_SCSI_TMFUNC:
-		cmd = isert_allocate_cmd(conn);
+		cmd = isert_allocate_cmd(conn, rx_desc);
 		if (!cmd)
 			break;
 
@@ -1514,22 +1545,20 @@
 						(unsigned char *)hdr);
 		break;
 	case ISCSI_OP_LOGOUT:
-		cmd = isert_allocate_cmd(conn);
+		cmd = isert_allocate_cmd(conn, rx_desc);
 		if (!cmd)
 			break;
 
 		ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
 		break;
 	case ISCSI_OP_TEXT:
-		if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
+		if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
 			cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
-			if (!cmd)
-				break;
-		} else {
-			cmd = isert_allocate_cmd(conn);
-			if (!cmd)
-				break;
-		}
+		else
+			cmd = isert_allocate_cmd(conn, rx_desc);
+
+		if (!cmd)
+			break;
 
 		isert_cmd = iscsit_priv_cmd(cmd);
 		ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
@@ -1589,7 +1618,7 @@
 	struct ib_device *ib_dev = isert_conn->cm_id->device;
 	struct iscsi_hdr *hdr;
 	u64 rx_dma;
-	int rx_buflen, outstanding;
+	int rx_buflen;
 
 	if ((char *)desc == isert_conn->login_req_buf) {
 		rx_dma = isert_conn->login_req_dma;
@@ -1629,22 +1658,6 @@
 				      DMA_FROM_DEVICE);
 
 	isert_conn->post_recv_buf_count--;
-	isert_dbg("Decremented post_recv_buf_count: %d\n",
-		  isert_conn->post_recv_buf_count);
-
-	if ((char *)desc == isert_conn->login_req_buf)
-		return;
-
-	outstanding = isert_conn->post_recv_buf_count;
-	if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
-		int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
-				ISERT_MIN_POSTED_RX);
-		err = isert_post_recv(isert_conn, count);
-		if (err) {
-			isert_err("isert_post_recv() count: %d failed, %d\n",
-			       count, err);
-		}
-	}
 }
 
 static int
@@ -2156,6 +2169,12 @@
 	struct ib_send_wr *wr_failed;
 	int ret;
 
+	ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
+	if (ret) {
+		isert_err("ib_post_recv failed with %d\n", ret);
+		return ret;
+	}
+
 	ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
 			   &wr_failed);
 	if (ret) {
@@ -2950,6 +2969,12 @@
 				   &isert_cmd->tx_desc.send_wr);
 		isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
 		wr->send_wr_num += 1;
+
+		rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
+		if (rc) {
+			isert_err("ib_post_recv failed with %d\n", rc);
+			return rc;
+		}
 	}
 
 	rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
@@ -2999,9 +3024,16 @@
 static int
 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
 {
-	int ret;
+	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+	int ret = 0;
 
 	switch (state) {
+	case ISTATE_REMOVE:
+		spin_lock_bh(&conn->cmd_lock);
+		list_del_init(&cmd->i_conn_node);
+		spin_unlock_bh(&conn->cmd_lock);
+		isert_put_cmd(isert_cmd, true);
+		break;
 	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
 		ret = isert_put_nopin(cmd, conn, false);
 		break;
@@ -3106,10 +3138,10 @@
 		isert_err("Unable to allocate struct isert_np\n");
 		return -ENOMEM;
 	}
-	sema_init(&isert_np->np_sem, 0);
-	mutex_init(&isert_np->np_accept_mutex);
-	INIT_LIST_HEAD(&isert_np->np_accept_list);
-	init_completion(&isert_np->np_login_comp);
+	sema_init(&isert_np->sem, 0);
+	mutex_init(&isert_np->mutex);
+	INIT_LIST_HEAD(&isert_np->accepted);
+	INIT_LIST_HEAD(&isert_np->pending);
 	isert_np->np = np;
 
 	/*
@@ -3125,7 +3157,7 @@
 		goto out;
 	}
 
-	isert_np->np_cm_id = isert_lid;
+	isert_np->cm_id = isert_lid;
 	np->np_context = isert_np;
 
 	return 0;
@@ -3214,7 +3246,7 @@
 	int ret;
 
 accept_wait:
-	ret = down_interruptible(&isert_np->np_sem);
+	ret = down_interruptible(&isert_np->sem);
 	if (ret)
 		return -ENODEV;
 
@@ -3231,15 +3263,15 @@
 	}
 	spin_unlock_bh(&np->np_thread_lock);
 
-	mutex_lock(&isert_np->np_accept_mutex);
-	if (list_empty(&isert_np->np_accept_list)) {
-		mutex_unlock(&isert_np->np_accept_mutex);
+	mutex_lock(&isert_np->mutex);
+	if (list_empty(&isert_np->pending)) {
+		mutex_unlock(&isert_np->mutex);
 		goto accept_wait;
 	}
-	isert_conn = list_first_entry(&isert_np->np_accept_list,
-			struct isert_conn, accept_node);
-	list_del_init(&isert_conn->accept_node);
-	mutex_unlock(&isert_np->np_accept_mutex);
+	isert_conn = list_first_entry(&isert_np->pending,
+			struct isert_conn, node);
+	list_del_init(&isert_conn->node);
+	mutex_unlock(&isert_np->mutex);
 
 	conn->context = isert_conn;
 	isert_conn->conn = conn;
@@ -3257,28 +3289,39 @@
 	struct isert_np *isert_np = np->np_context;
 	struct isert_conn *isert_conn, *n;
 
-	if (isert_np->np_cm_id)
-		rdma_destroy_id(isert_np->np_cm_id);
+	if (isert_np->cm_id)
+		rdma_destroy_id(isert_np->cm_id);
 
 	/*
 	 * FIXME: At this point we don't have a good way to insure
 	 * that at this point we don't have hanging connections that
 	 * completed RDMA establishment but didn't start iscsi login
 	 * process. So work-around this by cleaning up what ever piled
-	 * up in np_accept_list.
+	 * up in accepted and pending lists.
 	 */
-	mutex_lock(&isert_np->np_accept_mutex);
-	if (!list_empty(&isert_np->np_accept_list)) {
-		isert_info("Still have isert connections, cleaning up...\n");
+	mutex_lock(&isert_np->mutex);
+	if (!list_empty(&isert_np->pending)) {
+		isert_info("Still have isert pending connections\n");
 		list_for_each_entry_safe(isert_conn, n,
-					 &isert_np->np_accept_list,
-					 accept_node) {
+					 &isert_np->pending,
+					 node) {
 			isert_info("cleaning isert_conn %p state (%d)\n",
 				   isert_conn, isert_conn->state);
 			isert_connect_release(isert_conn);
 		}
 	}
-	mutex_unlock(&isert_np->np_accept_mutex);
+
+	if (!list_empty(&isert_np->accepted)) {
+		isert_info("Still have isert accepted connections\n");
+		list_for_each_entry_safe(isert_conn, n,
+					 &isert_np->accepted,
+					 node) {
+			isert_info("cleaning isert_conn %p state (%d)\n",
+				   isert_conn, isert_conn->state);
+			isert_connect_release(isert_conn);
+		}
+	}
+	mutex_unlock(&isert_np->mutex);
 
 	np->np_context = NULL;
 	kfree(isert_np);
@@ -3345,6 +3388,41 @@
 	wait_for_completion(&isert_conn->wait_comp_err);
 }
 
+/**
+ * isert_put_unsol_pending_cmds() - Drop commands waiting for
+ *     unsolicitate dataout
+ * @conn:    iscsi connection
+ *
+ * We might still have commands that are waiting for unsolicited
+ * dataouts messages. We must put the extra reference on those
+ * before blocking on the target_wait_for_session_cmds
+ */
+static void
+isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
+{
+	struct iscsi_cmd *cmd, *tmp;
+	static LIST_HEAD(drop_cmd_list);
+
+	spin_lock_bh(&conn->cmd_lock);
+	list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
+		if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
+		    (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
+		    (cmd->write_data_done < cmd->se_cmd.data_length))
+			list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
+	}
+	spin_unlock_bh(&conn->cmd_lock);
+
+	list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
+		list_del_init(&cmd->i_conn_node);
+		if (cmd->i_state != ISTATE_REMOVE) {
+			struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+
+			isert_info("conn %p dropping cmd %p\n", conn, cmd);
+			isert_put_cmd(isert_cmd, true);
+		}
+	}
+}
+
 static void isert_wait_conn(struct iscsi_conn *conn)
 {
 	struct isert_conn *isert_conn = conn->context;
@@ -3363,8 +3441,9 @@
 	isert_conn_terminate(isert_conn);
 	mutex_unlock(&isert_conn->mutex);
 
-	isert_wait4cmds(conn);
 	isert_wait4flush(isert_conn);
+	isert_put_unsol_pending_cmds(conn);
+	isert_wait4cmds(conn);
 	isert_wait4logout(isert_conn);
 
 	queue_work(isert_release_wq, &isert_conn->release_work);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 6a04ba3..c5b99bc 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -113,7 +113,6 @@
 };
 
 struct isert_rdma_wr {
-	struct list_head	wr_list;
 	struct isert_cmd	*isert_cmd;
 	enum iser_ib_op_code	iser_ib_op;
 	struct ib_sge		*ib_sge;
@@ -134,14 +133,13 @@
 	uint64_t		write_va;
 	u64			pdu_buf_dma;
 	u32			pdu_buf_len;
-	u32			read_va_off;
-	u32			write_va_off;
-	u32			rdma_wr_num;
 	struct isert_conn	*conn;
 	struct iscsi_cmd	*iscsi_cmd;
 	struct iser_tx_desc	tx_desc;
+	struct iser_rx_desc	*rx_desc;
 	struct isert_rdma_wr	rdma_wr;
 	struct work_struct	comp_work;
+	struct scatterlist	sg;
 };
 
 struct isert_device;
@@ -159,11 +157,10 @@
 	u64			login_req_dma;
 	int			login_req_len;
 	u64			login_rsp_dma;
-	unsigned int		rx_desc_head;
 	struct iser_rx_desc	*rx_descs;
-	struct ib_recv_wr	rx_wr[ISERT_MIN_POSTED_RX];
+	struct ib_recv_wr	rx_wr[ISERT_QP_MAX_RECV_DTOS];
 	struct iscsi_conn	*conn;
-	struct list_head	accept_node;
+	struct list_head	node;
 	struct completion	login_comp;
 	struct completion	login_req_comp;
 	struct iser_tx_desc	login_tx_desc;
@@ -222,9 +219,9 @@
 
 struct isert_np {
 	struct iscsi_np         *np;
-	struct semaphore	np_sem;
-	struct rdma_cm_id	*np_cm_id;
-	struct mutex		np_accept_mutex;
-	struct list_head	np_accept_list;
-	struct completion	np_login_comp;
+	struct semaphore	sem;
+	struct rdma_cm_id	*cm_id;
+	struct mutex		mutex;
+	struct list_head	accepted;
+	struct list_head	pending;
 };
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 56eb471..4215b53 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -196,6 +196,7 @@
 config JOYSTICK_ZHENHUA
 	tristate "5-byte Zhenhua RC transmitter"
 	select SERIO
+	select BITREVERSE
 	help
 	  Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
 	  supplied with a ready to fly micro electric indoor helicopters
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index b76ac58..a8bc2fe 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -150,7 +150,7 @@
 		if (w->counter == 24) {	/* full frame */
 			walkera0701_parse_frame(w);
 			w->counter = NO_SYNC;
-			if (abs(pulse_time - SYNC_PULSE) < RESERVE)	/* new frame sync */
+			if (abs64(pulse_time - SYNC_PULSE) < RESERVE)	/* new frame sync */
 				w->counter = 0;
 		} else {
 			if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE)
@@ -161,7 +161,7 @@
 			} else
 				w->counter = NO_SYNC;
 		}
-	} else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) <
+	} else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) <
 				RESERVE + BIN1_PULSE - BIN0_PULSE)	/* frame sync .. */
 		w->counter = 0;
 
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index b052afe..6639b2b 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -266,7 +266,7 @@
 
 	error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
 	if (error)
-		return error;
+		goto err_free_keypad;
 
 	res = request_mem_region(res->start, resource_size(res), pdev->name);
 	if (!res) {
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 867db8a..e317b75 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -93,7 +93,7 @@
 	default:
 		reset_type = PON_PS_HOLD_TYPE_HARD_RESET;
 		break;
-	};
+	}
 
 	error = regmap_update_bits(pwrkey->regmap,
 				   pwrkey->baseaddr + PON_PS_HOLD_RST_CTL,
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 345df9b..5adbced 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -414,7 +414,7 @@
 	dev->id.product	= user_dev->id.product;
 	dev->id.version	= user_dev->id.version;
 
-	for_each_set_bit(i, dev->absbit, ABS_CNT) {
+	for (i = 0; i < ABS_CNT; i++) {
 		input_abs_set_max(dev, i, user_dev->absmax[i]);
 		input_abs_set_min(dev, i, user_dev->absmin[i]);
 		input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
index 5f19107..e4eb048 100644
--- a/drivers/input/mouse/cyapa_gen6.c
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -241,14 +241,10 @@
 	memcpy(&cyapa->product_id[13], &resp_data[62], 2);
 	cyapa->product_id[15] = '\0';
 
+	/* Get the number of Rx electrodes. */
 	rotat_align = resp_data[68];
-	if (rotat_align) {
-		cyapa->electrodes_rx = cyapa->electrodes_y;
-		cyapa->electrodes_rx = cyapa->electrodes_y;
-	} else {
-		cyapa->electrodes_rx = cyapa->electrodes_x;
-		cyapa->electrodes_rx = cyapa->electrodes_y;
-	}
+	cyapa->electrodes_rx =
+		rotat_align ? cyapa->electrodes_y : cyapa->electrodes_x;
 	cyapa->aligned_electrodes_rx = (cyapa->electrodes_rx + 3) & ~3u;
 
 	if (!cyapa->electrodes_x || !cyapa->electrodes_y ||
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index 73670f2..c0ec261 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -60,7 +60,7 @@
 	int (*get_sm_version)(struct i2c_client *client,
 			      u8* ic_type, u8 *version);
 	int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
-	int (*get_product_id)(struct i2c_client *client, u8 *id);
+	int (*get_product_id)(struct i2c_client *client, u16 *id);
 
 	int (*get_max)(struct i2c_client *client,
 		       unsigned int *max_x, unsigned int *max_y);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index fa94530..5e1665b 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -40,7 +40,7 @@
 #include "elan_i2c.h"
 
 #define DRIVER_NAME		"elan_i2c"
-#define ELAN_DRIVER_VERSION	"1.6.0"
+#define ELAN_DRIVER_VERSION	"1.6.1"
 #define ETP_MAX_PRESSURE	255
 #define ETP_FWIDTH_REDUCE	90
 #define ETP_FINGER_WIDTH	15
@@ -76,7 +76,7 @@
 	unsigned int		x_res;
 	unsigned int		y_res;
 
-	u8			product_id;
+	u16			product_id;
 	u8			fw_version;
 	u8			sm_version;
 	u8			iap_version;
@@ -98,15 +98,25 @@
 			   u16 *signature_address)
 {
 	switch (iap_version) {
+	case 0x00:
+	case 0x06:
 	case 0x08:
 		*validpage_count = 512;
 		break;
+	case 0x03:
+	case 0x07:
 	case 0x09:
+	case 0x0A:
+	case 0x0B:
+	case 0x0C:
 		*validpage_count = 768;
 		break;
 	case 0x0D:
 		*validpage_count = 896;
 		break;
+	case 0x0E:
+		*validpage_count = 640;
+		break;
 	default:
 		/* unknown ic type clear value */
 		*validpage_count = 0;
@@ -266,11 +276,10 @@
 
 	error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
 				&data->fw_signature_address);
-	if (error) {
-		dev_err(&data->client->dev,
-			"unknown iap version %d\n", data->iap_version);
-		return error;
-	}
+	if (error)
+		dev_warn(&data->client->dev,
+			 "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n",
+			 data->iap_version, data->ic_type);
 
 	return 0;
 }
@@ -486,6 +495,9 @@
 	const u8 *fw_signature;
 	static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF};
 
+	if (data->fw_validpage_count == 0)
+		return -EINVAL;
+
 	/* Look for a firmware with the product id appended. */
 	fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id);
 	if (!fw_name) {
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 683c840..a679e56 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -276,7 +276,7 @@
 	return 0;
 }
 
-static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
+static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id)
 {
 	int error;
 	u8 val[3];
@@ -287,7 +287,7 @@
 		return error;
 	}
 
-	*id = val[0];
+	*id = le16_to_cpup((__le16 *)val);
 	return 0;
 }
 
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index ff36a36..cb6aecb 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -183,7 +183,7 @@
 	return 0;
 }
 
-static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
+static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
 {
 	int error;
 	u8 val[3];
@@ -195,7 +195,7 @@
 		return error;
 	}
 
-	*id = val[1];
+	*id = be16_to_cpup((__be16 *)val);
 	return 0;
 }
 
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 994ae78..6025eb4 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -519,18 +519,14 @@
 	struct synaptics_data *priv = psmouse->private;
 
 	priv->mode = 0;
-
-	if (priv->absolute_mode) {
+	if (priv->absolute_mode)
 		priv->mode |= SYN_BIT_ABSOLUTE_MODE;
-		if (SYN_CAP_EXTENDED(priv->capabilities))
-			priv->mode |= SYN_BIT_W_MODE;
-	}
-
-	if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture)
+	if (priv->disable_gesture)
 		priv->mode |= SYN_BIT_DISABLE_GESTURE;
-
 	if (psmouse->rate >= 80)
 		priv->mode |= SYN_BIT_HIGH_RATE;
+	if (SYN_CAP_EXTENDED(priv->capabilities))
+		priv->mode |= SYN_BIT_W_MODE;
 
 	if (synaptics_mode_cmd(psmouse, priv->mode))
 		return -1;
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 7551699..316f2c8 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -212,12 +212,17 @@
 	 * time before the ACK arrives.
 	 */
 	if (ps2_sendbyte(ps2dev, command & 0xff,
-			 command == PS2_CMD_RESET_BAT ? 1000 : 200))
-		goto out;
+			 command == PS2_CMD_RESET_BAT ? 1000 : 200)) {
+		serio_pause_rx(ps2dev->serio);
+		goto out_reset_flags;
+	}
 
-	for (i = 0; i < send; i++)
-		if (ps2_sendbyte(ps2dev, param[i], 200))
-			goto out;
+	for (i = 0; i < send; i++) {
+		if (ps2_sendbyte(ps2dev, param[i], 200)) {
+			serio_pause_rx(ps2dev->serio);
+			goto out_reset_flags;
+		}
+	}
 
 	/*
 	 * The reset command takes a long time to execute.
@@ -234,17 +239,18 @@
 				   !(ps2dev->flags & PS2_FLAG_CMD), timeout);
 	}
 
+	serio_pause_rx(ps2dev->serio);
+
 	if (param)
 		for (i = 0; i < receive; i++)
 			param[i] = ps2dev->cmdbuf[(receive - 1) - i];
 
 	if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1))
-		goto out;
+		goto out_reset_flags;
 
 	rc = 0;
 
- out:
-	serio_pause_rx(ps2dev->serio);
+ out_reset_flags:
 	ps2dev->flags = 0;
 	serio_continue_rx(ps2dev->serio);
 
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 26b4593..1e8cd6f 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -194,6 +194,7 @@
 	parkbd_port = parkbd_allocate_serio();
 	if (!parkbd_port) {
 		parport_release(parkbd_dev);
+		parport_unregister_device(parkbd_dev);
 		return -ENOMEM;
 	}
 
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 0f5f968..04edc8f 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -668,18 +668,22 @@
 
 static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m)
 {
+	int value;
 	struct spi_transfer *t =
 		list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
 
 	if (ts->model == 7845) {
-		return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3;
+		value = be16_to_cpup((__be16 *)&(((char *)t->rx_buf)[1]));
 	} else {
 		/*
 		 * adjust:  on-wire is a must-ignore bit, a BE12 value, then
 		 * padding; built from two 8 bit values written msb-first.
 		 */
-		return be16_to_cpup((__be16 *)t->rx_buf) >> 3;
+		value = be16_to_cpup((__be16 *)t->rx_buf);
 	}
+
+	/* enforce ADC output is 12 bits width */
+	return (value >> 3) & 0xfff;
 }
 
 static void ads7846_update_value(struct spi_message *m, int val)
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index ff0b758..8275267 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -94,7 +94,7 @@
  * TSC module need ADC to get the measure value. So
  * before config TSC, we should initialize ADC module.
  */
-static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
+static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
 {
 	int adc_hc = 0;
 	int adc_gc;
@@ -122,17 +122,23 @@
 
 	timeout = wait_for_completion_timeout
 			(&tsc->completion, ADC_TIMEOUT);
-	if (timeout == 0)
+	if (timeout == 0) {
 		dev_err(tsc->dev, "Timeout for adc calibration\n");
+		return -ETIMEDOUT;
+	}
 
 	adc_gs = readl(tsc->adc_regs + REG_ADC_GS);
-	if (adc_gs & ADC_CALF)
+	if (adc_gs & ADC_CALF) {
 		dev_err(tsc->dev, "ADC calibration failed\n");
+		return -EINVAL;
+	}
 
 	/* TSC need the ADC work in hardware trigger */
 	adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
 	adc_cfg |= ADC_HARDWARE_TRIGGER;
 	writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
+
+	return 0;
 }
 
 /*
@@ -188,11 +194,17 @@
 	writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
 }
 
-static void imx6ul_tsc_init(struct imx6ul_tsc *tsc)
+static int imx6ul_tsc_init(struct imx6ul_tsc *tsc)
 {
-	imx6ul_adc_init(tsc);
+	int err;
+
+	err = imx6ul_adc_init(tsc);
+	if (err)
+		return err;
 	imx6ul_tsc_channel_config(tsc);
 	imx6ul_tsc_set(tsc);
+
+	return 0;
 }
 
 static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
@@ -311,9 +323,7 @@
 		return err;
 	}
 
-	imx6ul_tsc_init(tsc);
-
-	return 0;
+	return imx6ul_tsc_init(tsc);
 }
 
 static void imx6ul_tsc_close(struct input_dev *input_dev)
@@ -337,7 +347,7 @@
 	int tsc_irq;
 	int adc_irq;
 
-	tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL);
+	tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
 	if (!tsc)
 		return -ENOMEM;
 
@@ -345,7 +355,7 @@
 	if (!input_dev)
 		return -ENOMEM;
 
-	input_dev->name = "iMX6UL TouchScreen Controller";
+	input_dev->name = "iMX6UL Touchscreen Controller";
 	input_dev->id.bustype = BUS_HOST;
 
 	input_dev->open = imx6ul_tsc_open;
@@ -406,7 +416,7 @@
 	}
 
 	adc_irq = platform_get_irq(pdev, 1);
-	if (adc_irq <= 0) {
+	if (adc_irq < 0) {
 		dev_err(&pdev->dev, "no adc irq resource?\n");
 		return adc_irq;
 	}
@@ -491,7 +501,7 @@
 			goto out;
 		}
 
-		imx6ul_tsc_init(tsc);
+		retval = imx6ul_tsc_init(tsc);
 	}
 
 out:
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 7cce876..1fafc9f 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -394,12 +394,12 @@
 	if (of_property_read_u32(np, "x-size", &pdata->x_size)) {
 		dev_err(dev, "failed to get x-size property\n");
 		return NULL;
-	};
+	}
 
 	if (of_property_read_u32(np, "y-size", &pdata->y_size)) {
 		dev_err(dev, "failed to get y-size property\n");
 		return NULL;
-	};
+	}
 
 	of_property_read_u32(np, "contact-threshold",
 				&pdata->contact_threshold);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 4664c2a..cbe6a89 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -23,8 +23,7 @@
 config IOMMU_IO_PGTABLE_LPAE
 	bool "ARMv7/v8 Long Descriptor Format"
 	select IOMMU_IO_PGTABLE
-	# SWIOTLB guarantees a dma_to_phys() implementation
-	depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB)
+	depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST)
 	help
 	  Enable support for the ARM long descriptor pagetable format.
 	  This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -43,7 +42,7 @@
 endmenu
 
 config IOMMU_IOVA
-	bool
+	tristate
 
 config OF_IOMMU
        def_bool y
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index f82060e7..08d2775 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2006,6 +2006,15 @@
 {
 	struct amd_iommu *iommu;
 
+	/*
+	 * First check if the device is still attached. It might already
+	 * be detached from its domain because the generic
+	 * iommu_detach_group code detached it and we try again here in
+	 * our alias handling.
+	 */
+	if (!dev_data->domain)
+		return;
+
 	iommu = amd_iommu_rlookup_table[dev_data->devid];
 
 	/* decrease reference counters */
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 5ef347a..1b066e7 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1256,6 +1256,9 @@
 	if (!iommu->dev)
 		return -ENODEV;
 
+	/* Prevent binding other PCI device drivers to IOMMU devices */
+	iommu->dev->match_driver = false;
+
 	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
 			      &iommu->cap);
 	pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index dafaf59..286e890 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -56,6 +56,7 @@
 #define IDR0_TTF_SHIFT			2
 #define IDR0_TTF_MASK			0x3
 #define IDR0_TTF_AARCH64		(2 << IDR0_TTF_SHIFT)
+#define IDR0_TTF_AARCH32_64		(3 << IDR0_TTF_SHIFT)
 #define IDR0_S1P			(1 << 1)
 #define IDR0_S2P			(1 << 0)
 
@@ -342,7 +343,8 @@
 #define CMDQ_TLBI_0_VMID_SHIFT		32
 #define CMDQ_TLBI_0_ASID_SHIFT		48
 #define CMDQ_TLBI_1_LEAF		(1UL << 0)
-#define CMDQ_TLBI_1_ADDR_MASK		~0xfffUL
+#define CMDQ_TLBI_1_VA_MASK		~0xfffUL
+#define CMDQ_TLBI_1_IPA_MASK		0xfffffffff000UL
 
 #define CMDQ_PRI_0_SSID_SHIFT		12
 #define CMDQ_PRI_0_SSID_MASK		0xfffffUL
@@ -770,11 +772,13 @@
 		break;
 	case CMDQ_OP_TLBI_NH_VA:
 		cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
-		/* Fallthrough */
+		cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
+		cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
+		break;
 	case CMDQ_OP_TLBI_S2_IPA:
 		cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
 		cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
-		cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_ADDR_MASK;
+		cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
 		break;
 	case CMDQ_OP_TLBI_NH_ASID:
 		cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
@@ -2460,7 +2464,13 @@
 	}
 
 	/* We only support the AArch64 table format at present */
-	if ((reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) < IDR0_TTF_AARCH64) {
+	switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
+	case IDR0_TTF_AARCH32_64:
+		smmu->ias = 40;
+		/* Fallthrough */
+	case IDR0_TTF_AARCH64:
+		break;
+	default:
 		dev_err(smmu->dev, "AArch64 table format not supported!\n");
 		return -ENXIO;
 	}
@@ -2541,8 +2551,7 @@
 		dev_warn(smmu->dev,
 			 "failed to set DMA mask for table walker\n");
 
-	if (!smmu->ias)
-		smmu->ias = smmu->oas;
+	smmu->ias = max(smmu->ias, smmu->oas);
 
 	dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
 		 smmu->ias, smmu->oas, smmu->features);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2d7349a..35365f0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2301,6 +2301,7 @@
 
 	if (ret) {
 		spin_unlock_irqrestore(&device_domain_lock, flags);
+		free_devinfo_mem(info);
 		return NULL;
 	}
 
@@ -3215,6 +3216,8 @@
 
 	/* Restrict dma_mask to the width that the iommu can handle */
 	dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+	/* Ensure we reserve the whole size-aligned region */
+	nrpages = __roundup_pow_of_two(nrpages);
 
 	if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
 		/*
@@ -3711,7 +3714,7 @@
 static int __init iommu_init_mempool(void)
 {
 	int ret;
-	ret = iommu_iova_cache_init();
+	ret = iova_cache_get();
 	if (ret)
 		return ret;
 
@@ -3725,7 +3728,7 @@
 
 	kmem_cache_destroy(iommu_domain_cache);
 domain_error:
-	iommu_iova_cache_destroy();
+	iova_cache_put();
 
 	return -ENOMEM;
 }
@@ -3734,7 +3737,7 @@
 {
 	kmem_cache_destroy(iommu_devinfo_cache);
 	kmem_cache_destroy(iommu_domain_cache);
-	iommu_iova_cache_destroy();
+	iova_cache_put();
 }
 
 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 73c0748..7df9777 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -202,9 +202,9 @@
 
 static bool selftest_running = false;
 
-static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
+static dma_addr_t __arm_lpae_dma_addr(void *pages)
 {
-	return phys_to_dma(dev, virt_to_phys(pages));
+	return (dma_addr_t)virt_to_phys(pages);
 }
 
 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
@@ -223,10 +223,10 @@
 			goto out_free;
 		/*
 		 * We depend on the IOMMU being able to work with any physical
-		 * address directly, so if the DMA layer suggests it can't by
-		 * giving us back some translation, that bodes very badly...
+		 * address directly, so if the DMA layer suggests otherwise by
+		 * translating or truncating them, that bodes very badly...
 		 */
-		if (dma != __arm_lpae_dma_addr(dev, pages))
+		if (dma != virt_to_phys(pages))
 			goto out_unmap;
 	}
 
@@ -243,10 +243,8 @@
 static void __arm_lpae_free_pages(void *pages, size_t size,
 				  struct io_pgtable_cfg *cfg)
 {
-	struct device *dev = cfg->iommu_dev;
-
 	if (!selftest_running)
-		dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages),
+		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
 				 size, DMA_TO_DEVICE);
 	free_pages_exact(pages, size);
 }
@@ -254,12 +252,11 @@
 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
 			       struct io_pgtable_cfg *cfg)
 {
-	struct device *dev = cfg->iommu_dev;
-
 	*ptep = pte;
 
 	if (!selftest_running)
-		dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep),
+		dma_sync_single_for_device(cfg->iommu_dev,
+					   __arm_lpae_dma_addr(ptep),
 					   sizeof(pte), DMA_TO_DEVICE);
 }
 
@@ -629,6 +626,11 @@
 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
 		return NULL;
 
+	if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
+		dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
+		return NULL;
+	}
+
 	data = kmalloc(sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return NULL;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b7c3d92..fa0adef 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -18,42 +18,9 @@
  */
 
 #include <linux/iova.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 
-static struct kmem_cache *iommu_iova_cache;
-
-int iommu_iova_cache_init(void)
-{
-	int ret = 0;
-
-	iommu_iova_cache = kmem_cache_create("iommu_iova",
-					 sizeof(struct iova),
-					 0,
-					 SLAB_HWCACHE_ALIGN,
-					 NULL);
-	if (!iommu_iova_cache) {
-		pr_err("Couldn't create iova cache\n");
-		ret = -ENOMEM;
-	}
-
-	return ret;
-}
-
-void iommu_iova_cache_destroy(void)
-{
-	kmem_cache_destroy(iommu_iova_cache);
-}
-
-struct iova *alloc_iova_mem(void)
-{
-	return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
-}
-
-void free_iova_mem(struct iova *iova)
-{
-	kmem_cache_free(iommu_iova_cache, iova);
-}
-
 void
 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
 	unsigned long start_pfn, unsigned long pfn_32bit)
@@ -72,6 +39,7 @@
 	iovad->start_pfn = start_pfn;
 	iovad->dma_32bit_pfn = pfn_32bit;
 }
+EXPORT_SYMBOL_GPL(init_iova_domain);
 
 static struct rb_node *
 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
@@ -120,19 +88,14 @@
 	}
 }
 
-/* Computes the padding size required, to make the
- * the start address naturally aligned on its size
+/*
+ * Computes the padding size required, to make the start address
+ * naturally aligned on the power-of-two order of its size
  */
-static int
-iova_get_pad_size(int size, unsigned int limit_pfn)
+static unsigned int
+iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
 {
-	unsigned int pad_size = 0;
-	unsigned int order = ilog2(size);
-
-	if (order)
-		pad_size = (limit_pfn + 1) % (1 << order);
-
-	return pad_size;
+	return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
 }
 
 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
@@ -242,6 +205,57 @@
 	rb_insert_color(&iova->node, root);
 }
 
+static struct kmem_cache *iova_cache;
+static unsigned int iova_cache_users;
+static DEFINE_MUTEX(iova_cache_mutex);
+
+struct iova *alloc_iova_mem(void)
+{
+	return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(alloc_iova_mem);
+
+void free_iova_mem(struct iova *iova)
+{
+	kmem_cache_free(iova_cache, iova);
+}
+EXPORT_SYMBOL(free_iova_mem);
+
+int iova_cache_get(void)
+{
+	mutex_lock(&iova_cache_mutex);
+	if (!iova_cache_users) {
+		iova_cache = kmem_cache_create(
+			"iommu_iova", sizeof(struct iova), 0,
+			SLAB_HWCACHE_ALIGN, NULL);
+		if (!iova_cache) {
+			mutex_unlock(&iova_cache_mutex);
+			printk(KERN_ERR "Couldn't create iova cache\n");
+			return -ENOMEM;
+		}
+	}
+
+	iova_cache_users++;
+	mutex_unlock(&iova_cache_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iova_cache_get);
+
+void iova_cache_put(void)
+{
+	mutex_lock(&iova_cache_mutex);
+	if (WARN_ON(!iova_cache_users)) {
+		mutex_unlock(&iova_cache_mutex);
+		return;
+	}
+	iova_cache_users--;
+	if (!iova_cache_users)
+		kmem_cache_destroy(iova_cache);
+	mutex_unlock(&iova_cache_mutex);
+}
+EXPORT_SYMBOL_GPL(iova_cache_put);
+
 /**
  * alloc_iova - allocates an iova
  * @iovad: - iova domain in question
@@ -265,12 +279,6 @@
 	if (!new_iova)
 		return NULL;
 
-	/* If size aligned is set then round the size to
-	 * to next power of two.
-	 */
-	if (size_aligned)
-		size = __roundup_pow_of_two(size);
-
 	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
 			new_iova, size_aligned);
 
@@ -281,6 +289,7 @@
 
 	return new_iova;
 }
+EXPORT_SYMBOL_GPL(alloc_iova);
 
 /**
  * find_iova - find's an iova for a given pfn
@@ -321,6 +330,7 @@
 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(find_iova);
 
 /**
  * __free_iova - frees the given iova
@@ -339,6 +349,7 @@
 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 	free_iova_mem(iova);
 }
+EXPORT_SYMBOL_GPL(__free_iova);
 
 /**
  * free_iova - finds and frees the iova for a given pfn
@@ -356,6 +367,7 @@
 		__free_iova(iovad, iova);
 
 }
+EXPORT_SYMBOL_GPL(free_iova);
 
 /**
  * put_iova_domain - destroys the iova doamin
@@ -378,6 +390,7 @@
 	}
 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 }
+EXPORT_SYMBOL_GPL(put_iova_domain);
 
 static int
 __is_range_overlap(struct rb_node *node,
@@ -467,6 +480,7 @@
 	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 	return iova;
 }
+EXPORT_SYMBOL_GPL(reserve_iova);
 
 /**
  * copy_reserved_iova - copies the reserved between domains
@@ -493,6 +507,7 @@
 	}
 	spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
 }
+EXPORT_SYMBOL_GPL(copy_reserved_iova);
 
 struct iova *
 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
@@ -534,3 +549,6 @@
 		free_iova_mem(prev);
 	return NULL;
 }
+
+MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 655cb96..9d89900 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -56,9 +56,6 @@
 
 #define ARMADA_370_XP_MAX_PER_CPU_IRQS		(28)
 
-#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ	(5)
-#define ARMADA_370_XP_FABRIC_IRQ		(3)
-
 #define IPI_DOORBELL_START                      (0)
 #define IPI_DOORBELL_END                        (8)
 #define IPI_DOORBELL_MASK                       0xFF
@@ -81,13 +78,10 @@
 
 static inline bool is_percpu_irq(irq_hw_number_t irq)
 {
-	switch (irq) {
-	case ARMADA_370_XP_TIMER0_PER_CPU_IRQ:
-	case ARMADA_370_XP_FABRIC_IRQ:
+	if (irq <= ARMADA_370_XP_MAX_PER_CPU_IRQS)
 		return true;
-	default:
-		return false;
-	}
+
+	return false;
 }
 
 /*
@@ -549,7 +543,7 @@
 		if (virq == 0)
 			continue;
 
-		if (irq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+		if (!is_percpu_irq(irq))
 			writel(irq, per_cpu_int_base +
 			       ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
 		else
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 9da9942..f6d6804 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -88,28 +88,36 @@
 {
 	struct irq_domain *domain = d->domain;
 	struct irq_domain_chip_generic *dgc = domain->gc;
-	struct irq_chip_generic *gc = dgc->gc[0];
+	struct irq_chip_generic *bgc = dgc->gc[0];
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 
-	/* Disable interrupt on AIC5 */
-	irq_gc_lock(gc);
+	/*
+	 * Disable interrupt on AIC5. We always take the lock of the
+	 * first irq chip as all chips share the same registers.
+	 */
+	irq_gc_lock(bgc);
 	irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
 	irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
 	gc->mask_cache &= ~d->mask;
-	irq_gc_unlock(gc);
+	irq_gc_unlock(bgc);
 }
 
 static void aic5_unmask(struct irq_data *d)
 {
 	struct irq_domain *domain = d->domain;
 	struct irq_domain_chip_generic *dgc = domain->gc;
-	struct irq_chip_generic *gc = dgc->gc[0];
+	struct irq_chip_generic *bgc = dgc->gc[0];
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 
-	/* Enable interrupt on AIC5 */
-	irq_gc_lock(gc);
+	/*
+	 * Enable interrupt on AIC5. We always take the lock of the
+	 * first irq chip as all chips share the same registers.
+	 */
+	irq_gc_lock(bgc);
 	irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
 	irq_reg_writel(gc, 1, AT91_AIC5_IECR);
 	gc->mask_cache |= d->mask;
-	irq_gc_unlock(gc);
+	irq_gc_unlock(bgc);
 }
 
 static int aic5_retrigger(struct irq_data *d)
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index cf351c6..a7c8c9f 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -62,7 +62,7 @@
 
 	dev_alias->dev_id = alias;
 	if (pdev != dev_alias->pdev)
-		dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+		dev_alias->count += its_pci_msi_vec_count(pdev);
 
 	return 0;
 }
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ac7ae2b..25ceae9f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -719,6 +719,9 @@
 out:
 	spin_unlock(&lpi_lock);
 
+	if (!bitmap)
+		*base = *nr_ids = 0;
+
 	return bitmap;
 }
 
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index af2f16b..aeaa061 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -320,6 +320,14 @@
 		intrmask[i] = gic_read(intrmask_reg);
 		pending_reg += gic_reg_step;
 		intrmask_reg += gic_reg_step;
+
+		if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
+			continue;
+
+		pending[i] |= (u64)gic_read(pending_reg) << 32;
+		intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
+		pending_reg += gic_reg_step;
+		intrmask_reg += gic_reg_step;
 	}
 
 	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
@@ -426,7 +434,7 @@
 	spin_lock_irqsave(&gic_lock, flags);
 
 	/* Re-route this IRQ */
-	gic_map_to_vpe(irq, cpumask_first(&tmp));
+	gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
 
 	/* Update the pcpu_masks */
 	for (i = 0; i < NR_CPUS; i++)
@@ -599,7 +607,7 @@
 				      GIC_SHARED_TO_HWIRQ(intr));
 	int i;
 
-	gic_map_to_vpe(intr, cpu);
+	gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
 	for (i = 0; i < NR_CPUS; i++)
 		clear_bit(intr, pcpu_masks[i].pcpu_mask);
 	set_bit(intr, pcpu_masks[cpu].pcpu_mask);
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 18accb0..c53a53f 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1247,7 +1247,7 @@
 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 {
 	struct PStack *st = fi->userdata;
-	struct sk_buff *skb;
+	struct sk_buff *skb, *nskb;
 	struct Layer2 *l2 = &st->l2;
 	u_char header[MAX_HEADER_LEN];
 	int i, hdr_space_needed;
@@ -1262,14 +1262,10 @@
 		return;
 
 	hdr_space_needed = l2headersize(l2, 0);
-	if (hdr_space_needed > skb_headroom(skb)) {
-		struct sk_buff *orig_skb = skb;
-
-		skb = skb_realloc_headroom(skb, hdr_space_needed);
-		if (!skb) {
-			dev_kfree_skb(orig_skb);
-			return;
-		}
+	nskb = skb_realloc_headroom(skb, hdr_space_needed);
+	if (!nskb) {
+		skb_queue_head(&l2->i_queue, skb);
+		return;
 	}
 	spin_lock_irqsave(&l2->lock, flags);
 	if (test_bit(FLG_MOD128, &l2->flag))
@@ -1282,7 +1278,7 @@
 		       p1);
 		dev_kfree_skb(l2->windowar[p1]);
 	}
-	l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC);
+	l2->windowar[p1] = skb;
 
 	i = sethdraddr(&st->l2, header, CMD);
 
@@ -1295,8 +1291,8 @@
 		l2->vs = (l2->vs + 1) % 8;
 	}
 	spin_unlock_irqrestore(&l2->lock, flags);
-	memcpy(skb_push(skb, i), header, i);
-	st->l2.l2l1(st, PH_PULL | INDICATION, skb);
+	memcpy(skb_push(nskb, i), header, i);
+	st->l2.l2l1(st, PH_PULL | INDICATION, nskb);
 	test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
 	if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
 		FsmDelTimer(&st->l2.t203, 13);
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
index 8b1a66c..e72b4e7 100644
--- a/drivers/isdn/mISDN/dsp_pipeline.c
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -235,7 +235,7 @@
 
 int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
 {
-	int len, incomplete = 0, found = 0;
+	int incomplete = 0, found = 0;
 	char *dup, *tok, *name, *args;
 	struct dsp_element_entry *entry, *n;
 	struct dsp_pipeline_entry *pipeline_entry;
@@ -247,17 +247,9 @@
 	if (!list_empty(&pipeline->list))
 		_dsp_pipeline_destroy(pipeline);
 
-	if (!cfg)
-		return 0;
-
-	len = strlen(cfg);
-	if (!len)
-		return 0;
-
-	dup = kmalloc(len + 1, GFP_ATOMIC);
+	dup = kstrdup(cfg, GFP_ATOMIC);
 	if (!dup)
 		return 0;
-	strcpy(dup, cfg);
 	while ((tok = strsep(&dup, "|"))) {
 		if (!strlen(tok))
 			continue;
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 949cabb..5eb380a 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -1476,7 +1476,7 @@
 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 {
 	struct layer2	*l2 = fi->userdata;
-	struct sk_buff	*skb, *nskb, *oskb;
+	struct sk_buff	*skb, *nskb;
 	u_char		header[MAX_L2HEADER_LEN];
 	u_int		i, p1;
 
@@ -1486,11 +1486,26 @@
 	skb = skb_dequeue(&l2->i_queue);
 	if (!skb)
 		return;
-
-	if (test_bit(FLG_MOD128, &l2->flag))
+	i = sethdraddr(l2, header, CMD);
+	if (test_bit(FLG_MOD128, &l2->flag)) {
+		header[i++] = l2->vs << 1;
+		header[i++] = l2->vr << 1;
+	} else
+		header[i++] = (l2->vr << 5) | (l2->vs << 1);
+	nskb = skb_realloc_headroom(skb, i);
+	if (!nskb) {
+		printk(KERN_WARNING "%s: no headroom(%d) copy for IFrame\n",
+		       mISDNDevName4ch(&l2->ch), i);
+		skb_queue_head(&l2->i_queue, skb);
+		return;
+	}
+	if (test_bit(FLG_MOD128, &l2->flag)) {
 		p1 = (l2->vs - l2->va) % 128;
-	else
+		l2->vs = (l2->vs + 1) % 128;
+	} else {
 		p1 = (l2->vs - l2->va) % 8;
+		l2->vs = (l2->vs + 1) % 8;
+	}
 	p1 = (p1 + l2->sow) % l2->window;
 	if (l2->windowar[p1]) {
 		printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
@@ -1498,36 +1513,7 @@
 		dev_kfree_skb(l2->windowar[p1]);
 	}
 	l2->windowar[p1] = skb;
-	i = sethdraddr(l2, header, CMD);
-	if (test_bit(FLG_MOD128, &l2->flag)) {
-		header[i++] = l2->vs << 1;
-		header[i++] = l2->vr << 1;
-		l2->vs = (l2->vs + 1) % 128;
-	} else {
-		header[i++] = (l2->vr << 5) | (l2->vs << 1);
-		l2->vs = (l2->vs + 1) % 8;
-	}
-
-	nskb = skb_clone(skb, GFP_ATOMIC);
-	p1 = skb_headroom(nskb);
-	if (p1 >= i)
-		memcpy(skb_push(nskb, i), header, i);
-	else {
-		printk(KERN_WARNING
-		       "%s: L2 pull_iqueue skb header(%d/%d) too short\n",
-		       mISDNDevName4ch(&l2->ch), i, p1);
-		oskb = nskb;
-		nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
-		if (!nskb) {
-			dev_kfree_skb(oskb);
-			printk(KERN_WARNING "%s: no skb mem in %s\n",
-			       mISDNDevName4ch(&l2->ch), __func__);
-			return;
-		}
-		memcpy(skb_put(nskb, i), header, i);
-		memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
-		dev_kfree_skb(oskb);
-	}
+	memcpy(skb_push(nskb, i), header, i);
 	l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
 	test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
 	if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index de36237..0516454 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -74,7 +74,7 @@
 		ret = -ENOTSUPP;
 		dev_err(&pdev->dev,
 			"IO mapped PCI devices are not supported\n");
-		goto out_release;
+		goto out_iounmap;
 	}
 
 	pci_set_drvdata(pdev, priv);
@@ -89,7 +89,7 @@
 
 	ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
 	if (ret < 0)
-		goto out_iounmap;
+		goto out_mcb_bus;
 	num_cells = ret;
 
 	dev_dbg(&pdev->dev, "Found %d cells\n", num_cells);
@@ -98,6 +98,8 @@
 
 	return 0;
 
+out_mcb_bus:
+	mcb_release_bus(priv->bus);
 out_iounmap:
 	iounmap(priv->base);
 out_release:
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index e51de52..48b5890 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1997,7 +1997,8 @@
 	if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
 		ret = bitmap_storage_alloc(&store, chunks,
 					   !bitmap->mddev->bitmap_info.external,
-					   bitmap->cluster_slot);
+					   mddev_is_clustered(bitmap->mddev)
+					   ? bitmap->cluster_slot : 0);
 	if (ret)
 		goto err;
 
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
index 240c9f0..8a09645 100644
--- a/drivers/md/dm-cache-policy-cleaner.c
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -436,7 +436,7 @@
 static struct dm_cache_policy_type wb_policy_type = {
 	.name = "cleaner",
 	.version = {1, 0, 0},
-	.hint_size = 0,
+	.hint_size = 4,
 	.owner = THIS_MODULE,
 	.create = wb_create
 };
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index ebaa4f8..192bb8b 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -203,7 +203,7 @@
 		return -EINVAL;
 	}
 
-	tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL);
+	tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL);
 	if (!tmp_store) {
 		ti->error = "Exception store allocation failed";
 		return -ENOMEM;
@@ -215,7 +215,7 @@
 	else if (persistent == 'N')
 		type = get_type("N");
 	else {
-		ti->error = "Persistent flag is not P or N";
+		ti->error = "Exception store type is not P or N";
 		r = -EINVAL;
 		goto bad_type;
 	}
@@ -233,7 +233,7 @@
 	if (r)
 		goto bad;
 
-	r = type->ctr(tmp_store, 0, NULL);
+	r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL));
 	if (r) {
 		ti->error = "Exception store type constructor failed";
 		goto bad;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 0b25362..fae34e7 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -42,8 +42,7 @@
 	const char *name;
 	struct module *module;
 
-	int (*ctr) (struct dm_exception_store *store,
-		    unsigned argc, char **argv);
+	int (*ctr) (struct dm_exception_store *store, char *options);
 
 	/*
 	 * Destroys this object when you've finished with it.
@@ -123,6 +122,8 @@
 	unsigned chunk_shift;
 
 	void *context;
+
+	bool userspace_supports_overflow;
 };
 
 /*
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 97e1651..a090121 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -329,8 +329,7 @@
 		 */
 		if (min_region_size > (1 << 13)) {
 			/* If not a power of 2, make it the next power of 2 */
-			if (min_region_size & (min_region_size - 1))
-				region_size = 1 << fls(region_size);
+			region_size = roundup_pow_of_two(min_region_size);
 			DMINFO("Choosing default region size of %lu sectors",
 			       region_size);
 		} else {
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index bf71583..117a05e 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -7,6 +7,7 @@
 
 #include "dm-exception-store.h"
 
+#include <linux/ctype.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/vmalloc.h>
@@ -843,10 +844,10 @@
 		DMWARN("write header failed");
 }
 
-static int persistent_ctr(struct dm_exception_store *store,
-			  unsigned argc, char **argv)
+static int persistent_ctr(struct dm_exception_store *store, char *options)
 {
 	struct pstore *ps;
+	int r;
 
 	/* allocate the pstore */
 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
@@ -868,14 +869,32 @@
 
 	ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
 	if (!ps->metadata_wq) {
-		kfree(ps);
 		DMERR("couldn't start header metadata update thread");
-		return -ENOMEM;
+		r = -ENOMEM;
+		goto err_workqueue;
+	}
+
+	if (options) {
+		char overflow = toupper(options[0]);
+		if (overflow == 'O')
+			store->userspace_supports_overflow = true;
+		else {
+			DMERR("Unsupported persistent store option: %s", options);
+			r = -EINVAL;
+			goto err_options;
+		}
 	}
 
 	store->context = ps;
 
 	return 0;
+
+err_options:
+	destroy_workqueue(ps->metadata_wq);
+err_workqueue:
+	kfree(ps);
+
+	return r;
 }
 
 static unsigned persistent_status(struct dm_exception_store *store,
@@ -888,7 +907,8 @@
 	case STATUSTYPE_INFO:
 		break;
 	case STATUSTYPE_TABLE:
-		DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
+		DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
+		       (unsigned long long)store->chunk_size);
 	}
 
 	return sz;
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
index 1ce9a25..9b7c8c8 100644
--- a/drivers/md/dm-snap-transient.c
+++ b/drivers/md/dm-snap-transient.c
@@ -70,8 +70,7 @@
 	*metadata_sectors = 0;
 }
 
-static int transient_ctr(struct dm_exception_store *store,
-			 unsigned argc, char **argv)
+static int transient_ctr(struct dm_exception_store *store, char *options)
 {
 	struct transient_c *tc;
 
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c0bcd65..c06b74e 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1098,7 +1098,7 @@
 }
 
 /*
- * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
+ * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
  */
 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
@@ -1302,6 +1302,7 @@
 
 	u.store_swap = snap_dest->store;
 	snap_dest->store = snap_src->store;
+	snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
 	snap_src->store = u.store_swap;
 
 	snap_dest->store->snap = snap_dest;
@@ -1739,8 +1740,11 @@
 
 			pe = __find_pending_exception(s, pe, chunk);
 			if (!pe) {
-				s->snapshot_overflowed = 1;
-				DMERR("Snapshot overflowed: Unable to allocate exception.");
+				if (s->store->userspace_supports_overflow) {
+					s->snapshot_overflowed = 1;
+					DMERR("Snapshot overflowed: Unable to allocate exception.");
+				} else
+					__invalidate_snapshot(s, -ENOMEM);
 				r = -EIO;
 				goto out_unlock;
 			}
@@ -2365,7 +2369,7 @@
 
 static struct target_type snapshot_target = {
 	.name    = "snapshot",
-	.version = {1, 14, 0},
+	.version = {1, 15, 0},
 	.module  = THIS_MODULE,
 	.ctr     = snapshot_ctr,
 	.dtr     = snapshot_dtr,
@@ -2379,7 +2383,7 @@
 
 static struct target_type merge_target = {
 	.name    = dm_snapshot_merge_target_name,
-	.version = {1, 3, 0},
+	.version = {1, 4, 0},
 	.module  = THIS_MODULE,
 	.ctr     = snapshot_ctr,
 	.dtr     = snapshot_dtr,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 6fcbfb0..3897b90 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3201,7 +3201,7 @@
 						metadata_low_callback,
 						pool);
 	if (r)
-		goto out_free_pt;
+		goto out_flags_changed;
 
 	pt->callbacks.congested_fn = pool_is_congested;
 	dm_table_add_target_callbacks(ti->table, &pt->callbacks);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6264781..1b5c604 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1001,6 +1001,7 @@
 	struct dm_rq_target_io *tio = info->tio;
 	struct bio *bio = info->orig;
 	unsigned int nr_bytes = info->orig->bi_iter.bi_size;
+	int error = clone->bi_error;
 
 	bio_put(clone);
 
@@ -1011,13 +1012,13 @@
 		 * the remainder.
 		 */
 		return;
-	else if (bio->bi_error) {
+	else if (error) {
 		/*
 		 * Don't notice the error to the upper layer yet.
 		 * The error handling decision is made by the target driver,
 		 * when the request is completed.
 		 */
-		tio->error = bio->bi_error;
+		tio->error = error;
 		return;
 	}
 
@@ -2837,8 +2838,6 @@
 
 	might_sleep();
 
-	map = dm_get_live_table(md, &srcu_idx);
-
 	spin_lock(&_minor_lock);
 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
 	set_bit(DMF_FREEING, &md->flags);
@@ -2852,14 +2851,14 @@
 	 * do not race with internal suspend.
 	 */
 	mutex_lock(&md->suspend_lock);
+	map = dm_get_live_table(md, &srcu_idx);
 	if (!dm_suspended_md(md)) {
 		dm_table_presuspend_targets(map);
 		dm_table_postsuspend_targets(map);
 	}
-	mutex_unlock(&md->suspend_lock);
-
 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
 	dm_put_live_table(md, srcu_idx);
+	mutex_unlock(&md->suspend_lock);
 
 	/*
 	 * Rare, but there may be I/O requests still going to complete,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4f5ecbe..c702de1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5409,9 +5409,13 @@
 		 * which will now never happen */
 		wake_up_process(mddev->sync_thread->tsk);
 
+	if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+		return -EBUSY;
 	mddev_unlock(mddev);
 	wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
 					  &mddev->recovery));
+	wait_event(mddev->sb_wait,
+		   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
 	mddev_lock_nointr(mddev);
 
 	mutex_lock(&mddev->open_mutex);
@@ -8160,6 +8164,7 @@
 			md_reap_sync_thread(mddev);
 			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+			clear_bit(MD_CHANGE_PENDING, &mddev->flags);
 			goto unlock;
 		}
 
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index d222522..d132f06 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -470,8 +470,7 @@
 	return 0;
 
 out_free_conf:
-	if (conf->pool)
-		mempool_destroy(conf->pool);
+	mempool_destroy(conf->pool);
 	kfree(conf->multipaths);
 	kfree(conf);
 	mddev->private = NULL;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 63e619b..f8e5db0 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -376,12 +376,6 @@
 		struct md_rdev *rdev;
 		bool discard_supported = false;
 
-		rdev_for_each(rdev, mddev) {
-			disk_stack_limits(mddev->gendisk, rdev->bdev,
-					  rdev->data_offset << 9);
-			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
-				discard_supported = true;
-		}
 		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
 		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
 		blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
@@ -390,6 +384,12 @@
 		blk_queue_io_opt(mddev->queue,
 				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
 
+		rdev_for_each(rdev, mddev) {
+			disk_stack_limits(mddev->gendisk, rdev->bdev,
+					  rdev->data_offset << 9);
+			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+				discard_supported = true;
+		}
 		if (!discard_supported)
 			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
 		else
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4517f06..ddd8a5f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -881,8 +881,7 @@
 	}
 
 	if (bio && bio_data_dir(bio) == WRITE) {
-		if (bio->bi_iter.bi_sector >=
-		    conf->mddev->curr_resync_completed) {
+		if (bio->bi_iter.bi_sector >= conf->next_resync) {
 			if (conf->start_next_window == MaxSector)
 				conf->start_next_window =
 					conf->next_resync +
@@ -1516,7 +1515,7 @@
 	conf->r1buf_pool = NULL;
 
 	spin_lock_irq(&conf->resync_lock);
-	conf->next_resync = 0;
+	conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
 	conf->start_next_window = MaxSector;
 	conf->current_window_requests +=
 		conf->next_window_requests;
@@ -2383,8 +2382,8 @@
 		}
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 		while (!list_empty(&tmp)) {
-			r1_bio = list_first_entry(&conf->bio_end_io_list,
-						  struct r1bio, retry_list);
+			r1_bio = list_first_entry(&tmp, struct r1bio,
+						  retry_list);
 			list_del(&r1_bio->retry_list);
 			raid_end_bio_io(r1_bio);
 		}
@@ -2843,8 +2842,7 @@
 
  abort:
 	if (conf) {
-		if (conf->r1bio_pool)
-			mempool_destroy(conf->r1bio_pool);
+		mempool_destroy(conf->r1bio_pool);
 		kfree(conf->mirrors);
 		safe_put_page(conf->tmppage);
 		kfree(conf->poolinfo);
@@ -2946,8 +2944,7 @@
 {
 	struct r1conf *conf = priv;
 
-	if (conf->r1bio_pool)
-		mempool_destroy(conf->r1bio_pool);
+	mempool_destroy(conf->r1bio_pool);
 	kfree(conf->mirrors);
 	safe_put_page(conf->tmppage);
 	kfree(conf->poolinfo);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0fc33eb..9f69dc5 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2688,8 +2688,8 @@
 		}
 		spin_unlock_irqrestore(&conf->device_lock, flags);
 		while (!list_empty(&tmp)) {
-			r10_bio = list_first_entry(&conf->bio_end_io_list,
-						  struct r10bio, retry_list);
+			r10_bio = list_first_entry(&tmp, struct r10bio,
+						   retry_list);
 			list_del(&r10_bio->retry_list);
 			raid_end_bio_io(r10_bio);
 		}
@@ -3486,8 +3486,7 @@
 		printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
 		       mdname(mddev));
 	if (conf) {
-		if (conf->r10bio_pool)
-			mempool_destroy(conf->r10bio_pool);
+		mempool_destroy(conf->r10bio_pool);
 		kfree(conf->mirrors);
 		safe_put_page(conf->tmppage);
 		kfree(conf);
@@ -3682,8 +3681,7 @@
 
 out_free_conf:
 	md_unregister_thread(&mddev->thread);
-	if (conf->r10bio_pool)
-		mempool_destroy(conf->r10bio_pool);
+	mempool_destroy(conf->r10bio_pool);
 	safe_put_page(conf->tmppage);
 	kfree(conf->mirrors);
 	kfree(conf);
@@ -3696,8 +3694,7 @@
 {
 	struct r10conf *conf = priv;
 
-	if (conf->r10bio_pool)
-		mempool_destroy(conf->r10bio_pool);
+	mempool_destroy(conf->r10bio_pool);
 	safe_put_page(conf->tmppage);
 	kfree(conf->mirrors);
 	kfree(conf->mirrors_old);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 15ef2c6..49bb8d3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2271,8 +2271,7 @@
 	       drop_one_stripe(conf))
 		;
 
-	if (conf->slab_cache)
-		kmem_cache_destroy(conf->slab_cache);
+	kmem_cache_destroy(conf->slab_cache);
 	conf->slab_cache = NULL;
 }
 
@@ -3150,6 +3149,8 @@
 			spin_unlock_irq(&sh->stripe_lock);
 			if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
 				wake_up(&conf->wait_for_overlap);
+			if (bi)
+				s->to_read--;
 			while (bi && bi->bi_iter.bi_sector <
 			       sh->dev[i].sector + STRIPE_SECTORS) {
 				struct bio *nextbi =
@@ -3169,6 +3170,8 @@
 		 */
 		clear_bit(R5_LOCKED, &sh->dev[i].flags);
 	}
+	s->to_write = 0;
+	s->written = 0;
 
 	if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
 		if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -3300,7 +3303,7 @@
 		 */
 		return 0;
 
-	for (i = 0; i < s->failed; i++) {
+	for (i = 0; i < s->failed && i < 2; i++) {
 		if (fdev[i]->towrite &&
 		    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
 		    !test_bit(R5_OVERWRITE, &fdev[i]->flags))
@@ -3324,7 +3327,7 @@
 	    sh->sector < sh->raid_conf->mddev->recovery_cp)
 		/* reconstruct-write isn't being forced */
 		return 0;
-	for (i = 0; i < s->failed; i++) {
+	for (i = 0; i < s->failed && i < 2; i++) {
 		if (s->failed_num[i] != sh->pd_idx &&
 		    s->failed_num[i] != sh->qd_idx &&
 		    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
index f28cb28..2c7f8d7 100644
--- a/drivers/mfd/intel-lpss.h
+++ b/drivers/mfd/intel-lpss.h
@@ -42,6 +42,8 @@
 	.thaw = intel_lpss_resume,		\
 	.poweroff = intel_lpss_suspend,		\
 	.restore = intel_lpss_resume,
+#else
+#define INTEL_LPSS_SLEEP_PM_OPS
 #endif
 
 #define INTEL_LPSS_RUNTIME_PM_OPS		\
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index c52162e..586098f 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -80,7 +80,7 @@
 	if (!max77843->i2c_chg) {
 		dev_err(&max77843->i2c->dev,
 				"Cannot allocate I2C device for Charger\n");
-		return PTR_ERR(max77843->i2c_chg);
+		return -ENODEV;
 	}
 	i2c_set_clientdata(max77843->i2c_chg, max77843);
 
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 8af12c8..103baf0 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -105,6 +105,7 @@
 
 void cxl_free_afu_irqs(struct cxl_context *ctx)
 {
+	afu_irq_name_free(ctx);
 	cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
 }
 EXPORT_SYMBOL_GPL(cxl_free_afu_irqs);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index e762f85..2faa127 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -275,6 +275,9 @@
 	if (ctx->kernelapi)
 		kfree(ctx->mapping);
 
+	if (ctx->irq_bitmap)
+		kfree(ctx->irq_bitmap);
+
 	kfree(ctx);
 }
 
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 1c30ef7..0cfb9c1 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -677,6 +677,7 @@
 void cxl_release_serr_irq(struct cxl_afu *afu);
 int afu_register_irqs(struct cxl_context *ctx, u32 count);
 void afu_release_irqs(struct cxl_context *ctx, void *cookie);
+void afu_irq_name_free(struct cxl_context *ctx);
 irqreturn_t cxl_slice_irq_err(int irq, void *data);
 
 int cxl_debugfs_init(void);
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index a30bf28..7ccd299 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -120,9 +120,16 @@
 		 __func__, ctx->pe);
 	cxl_context_detach(ctx);
 
-	mutex_lock(&ctx->mapping_lock);
-	ctx->mapping = NULL;
-	mutex_unlock(&ctx->mapping_lock);
+
+	/*
+	 * Delete the context's mapping pointer, unless it's created by the
+	 * kernel API, in which case leave it so it can be freed by reclaim_ctx()
+	 */
+	if (!ctx->kernelapi) {
+		mutex_lock(&ctx->mapping_lock);
+		ctx->mapping = NULL;
+		mutex_unlock(&ctx->mapping_lock);
+	}
 
 	put_device(&ctx->afu->dev);
 
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 583b42a..09a4060 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -414,7 +414,7 @@
 	kfree(afu->psl_irq_name);
 }
 
-static void afu_irq_name_free(struct cxl_context *ctx)
+void afu_irq_name_free(struct cxl_context *ctx)
 {
 	struct cxl_irq_name *irq_name, *tmp;
 
@@ -524,7 +524,5 @@
 	afu_irq_name_free(ctx);
 	cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
 
-	kfree(ctx->irq_bitmap);
-	ctx->irq_bitmap = NULL;
 	ctx->irq_count = 0;
 }
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index b37f2e8..d2e75c8 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -457,6 +457,7 @@
 
 	dev_info(&afu->dev, "Activating AFU directed mode\n");
 
+	afu->num_procs = afu->max_procs_virtualised;
 	if (afu->spa == NULL) {
 		if (cxl_alloc_spa(afu))
 			return -ENOMEM;
@@ -468,7 +469,6 @@
 	cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
 
 	afu->current_mode = CXL_MODE_DIRECTED;
-	afu->num_procs = afu->max_procs_virtualised;
 
 	if ((rc = cxl_chardev_m_afu_add(afu)))
 		return rc;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index a5e9771..85761d7 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1035,6 +1035,32 @@
 	return 0;
 }
 
+/*
+ * Workaround a PCIe Host Bridge defect on some cards, that can cause
+ * malformed Transaction Layer Packet (TLP) errors to be erroneously
+ * reported. Mask this error in the Uncorrectable Error Mask Register.
+ *
+ * The upper nibble of the PSL revision is used to distinguish between
+ * different cards. The affected ones have it set to 0.
+ */
+static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
+{
+	int aer;
+	u32 data;
+
+	if (adapter->psl_rev & 0xf000)
+		return;
+	if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
+		return;
+	pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
+	if (data & PCI_ERR_UNC_MALF_TLP)
+		if (data & PCI_ERR_UNC_INTN)
+			return;
+	data |= PCI_ERR_UNC_MALF_TLP;
+	data |= PCI_ERR_UNC_INTN;
+	pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
+}
+
 static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
 {
 	if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
@@ -1134,6 +1160,8 @@
 	if ((rc = cxl_vsec_looks_ok(adapter, dev)))
 	        return rc;
 
+	cxl_fixup_malformed_tlp(adapter, dev);
+
 	if ((rc = setup_cxl_bars(dev)))
 		return rc;
 
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 4b469cf..8504dbe 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -204,6 +204,8 @@
 	if (!dir)
 		return -ENOMEM;
 
+	dev->dbgfs_dir = dir;
+
 	f = debugfs_create_file("meclients", S_IRUSR, dir,
 				dev, &mei_dbgfs_fops_meclients);
 	if (!f) {
@@ -228,7 +230,6 @@
 		dev_err(dev->dev, "allow_fixed_address: registration failed\n");
 		goto err;
 	}
-	dev->dbgfs_dir = dir;
 	return 0;
 err:
 	mei_dbgfs_deregister(dev);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 8eec887..6d7c188 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1209,7 +1209,7 @@
 		 * after the host receives the enum_resp
 		 * message clients may be added or removed
 		 */
-		if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS &&
+		if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS ||
 		    dev->hbm_state >= MEI_HBM_STOPPED) {
 			dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
 				dev->dev_state, dev->hbm_state);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0520064..a3eb20b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -134,9 +134,11 @@
 	int err = cmd->error;
 
 	/* Flag re-tuning needed on CRC errors */
-	if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
+	if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+	    cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
+	    (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
 	    (mrq->data && mrq->data->error == -EILSEQ) ||
-	    (mrq->stop && mrq->stop->error == -EILSEQ))
+	    (mrq->stop && mrq->stop->error == -EILSEQ)))
 		mmc_retune_needed(host);
 
 	if (err && cmd->retries && mmc_host_is_spi(host)) {
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index abd933b..5466f25 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -457,7 +457,7 @@
 					   0, &cd_gpio_invert);
 		if (!ret)
 			dev_info(host->parent, "Got CD GPIO\n");
-		else if (ret != -ENOENT)
+		else if (ret != -ENOENT && ret != -ENOSYS)
 			return ret;
 
 		/*
@@ -481,7 +481,7 @@
 	ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
 	if (!ret)
 		dev_info(host->parent, "Got WP GPIO\n");
-	else if (ret != -ENOENT)
+	else if (ret != -ENOENT && ret != -ENOSYS)
 		return ret;
 
 	if (of_property_read_bool(np, "disable-wp"))
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 781e4db..7fb0753 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -182,6 +182,7 @@
 	struct	clk		*fclk;
 	struct	clk		*dbclk;
 	struct	regulator	*pbias;
+	bool			pbias_enabled;
 	void	__iomem		*base;
 	int			vqmmc_enabled;
 	resource_size_t		mapbase;
@@ -328,20 +329,22 @@
 			return ret;
 		}
 
-		if (!regulator_is_enabled(host->pbias)) {
+		if (host->pbias_enabled == 0) {
 			ret = regulator_enable(host->pbias);
 			if (ret) {
 				dev_err(host->dev, "pbias reg enable fail\n");
 				return ret;
 			}
+			host->pbias_enabled = 1;
 		}
 	} else {
-		if (regulator_is_enabled(host->pbias)) {
+		if (host->pbias_enabled == 1) {
 			ret = regulator_disable(host->pbias);
 			if (ret) {
 				dev_err(host->dev, "pbias reg disable fail\n");
 				return ret;
 			}
+			host->pbias_enabled = 0;
 		}
 	}
 
@@ -475,7 +478,7 @@
 	mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc");
 	if (IS_ERR(mmc->supply.vmmc)) {
 		ret = PTR_ERR(mmc->supply.vmmc);
-		if (ret != -ENODEV)
+		if ((ret != -ENODEV) && host->dev->of_node)
 			return ret;
 		dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
 			PTR_ERR(mmc->supply.vmmc));
@@ -490,7 +493,7 @@
 	mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux");
 	if (IS_ERR(mmc->supply.vqmmc)) {
 		ret = PTR_ERR(mmc->supply.vqmmc);
-		if (ret != -ENODEV)
+		if ((ret != -ENODEV) && host->dev->of_node)
 			return ret;
 		dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
 			PTR_ERR(mmc->supply.vqmmc));
@@ -500,7 +503,7 @@
 	host->pbias = devm_regulator_get_optional(host->dev, "pbias");
 	if (IS_ERR(host->pbias)) {
 		ret = PTR_ERR(host->pbias);
-		if (ret != -ENODEV)
+		if ((ret != -ENODEV) && host->dev->of_node)
 			return ret;
 		dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
 			PTR_ERR(host->pbias));
@@ -2053,6 +2056,7 @@
 	host->base	= base + pdata->reg_offset;
 	host->power_mode = MMC_POWER_OFF;
 	host->next_data.cookie = 1;
+	host->pbias_enabled = 0;
 	host->vqmmc_enabled = 0;
 
 	ret = omap_hsmmc_gpio_init(mmc, host, pdata);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 1420f29..8cadd74 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -28,6 +28,7 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
 #include <linux/io.h>
 #include <linux/regulator/consumer.h>
 #include <linux/gpio.h>
@@ -454,12 +455,8 @@
 {
 	struct pxamci_host *host = mmc_priv(mmc);
 
-	if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
-		if (host->pdata->gpio_card_ro_invert)
-			return !gpio_get_value(host->pdata->gpio_card_ro);
-		else
-			return gpio_get_value(host->pdata->gpio_card_ro);
-	}
+	if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro))
+		return mmc_gpio_get_ro(mmc);
 	if (host->pdata && host->pdata->get_ro)
 		return !!host->pdata->get_ro(mmc_dev(mmc));
 	/*
@@ -551,6 +548,7 @@
 
 static const struct mmc_host_ops pxamci_ops = {
 	.request		= pxamci_request,
+	.get_cd			= mmc_gpio_get_cd,
 	.get_ro			= pxamci_get_ro,
 	.set_ios		= pxamci_set_ios,
 	.enable_sdio_irq	= pxamci_enable_sdio_irq,
@@ -790,37 +788,31 @@
 		gpio_power = host->pdata->gpio_power;
 	}
 	if (gpio_is_valid(gpio_power)) {
-		ret = gpio_request(gpio_power, "mmc card power");
+		ret = devm_gpio_request(&pdev->dev, gpio_power,
+					"mmc card power");
 		if (ret) {
-			dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
+			dev_err(&pdev->dev, "Failed requesting gpio_power %d\n",
+				gpio_power);
 			goto out;
 		}
 		gpio_direction_output(gpio_power,
 				      host->pdata->gpio_power_invert);
 	}
-	if (gpio_is_valid(gpio_ro)) {
-		ret = gpio_request(gpio_ro, "mmc card read only");
-		if (ret) {
-			dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
-			goto err_gpio_ro;
-		}
-		gpio_direction_input(gpio_ro);
+	if (gpio_is_valid(gpio_ro))
+		ret = mmc_gpio_request_ro(mmc, gpio_ro);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
+		goto out;
+	} else {
+		mmc->caps |= host->pdata->gpio_card_ro_invert ?
+			MMC_CAP2_RO_ACTIVE_HIGH : 0;
 	}
-	if (gpio_is_valid(gpio_cd)) {
-		ret = gpio_request(gpio_cd, "mmc card detect");
-		if (ret) {
-			dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
-			goto err_gpio_cd;
-		}
-		gpio_direction_input(gpio_cd);
 
-		ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
-				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-				  "mmc card detect", mmc);
-		if (ret) {
-			dev_err(&pdev->dev, "failed to request card detect IRQ\n");
-			goto err_request_irq;
-		}
+	if (gpio_is_valid(gpio_cd))
+		ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
+		goto out;
 	}
 
 	if (host->pdata && host->pdata->init)
@@ -835,13 +827,7 @@
 
 	return 0;
 
-err_request_irq:
-	gpio_free(gpio_cd);
-err_gpio_cd:
-	gpio_free(gpio_ro);
-err_gpio_ro:
-	gpio_free(gpio_power);
- out:
+out:
 	if (host) {
 		if (host->dma_chan_rx)
 			dma_release_channel(host->dma_chan_rx);
@@ -873,14 +859,6 @@
 			gpio_ro = host->pdata->gpio_card_ro;
 			gpio_power = host->pdata->gpio_power;
 		}
-		if (gpio_is_valid(gpio_cd)) {
-			free_irq(gpio_to_irq(gpio_cd), mmc);
-			gpio_free(gpio_cd);
-		}
-		if (gpio_is_valid(gpio_ro))
-			gpio_free(gpio_ro);
-		if (gpio_is_valid(gpio_power))
-			gpio_free(gpio_power);
 		if (host->vcc)
 			regulator_put(host->vcc);
 
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index d155664..a0f05de 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -43,6 +43,7 @@
 
 static const struct sdhci_pltfm_data soc_data_sama5d2 = {
 	.ops = &sdhci_at91_sama5d2_ops,
+	.quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST,
 };
 
 static const struct of_device_id sdhci_at91_dt_match[] = {
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 946d37f..f5edf9d 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -135,6 +135,7 @@
 	struct sdhci_pxa *pxa = pltfm_host->priv;
 	struct resource *res;
 
+	host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
 	host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 					   "conf-sdio3");
@@ -290,6 +291,9 @@
 		    uhs == MMC_TIMING_UHS_DDR50) {
 			reg_val &= ~SDIO3_CONF_CLK_INV;
 			reg_val |= SDIO3_CONF_SD_FB_CLK;
+		} else if (uhs == MMC_TIMING_MMC_HS) {
+			reg_val &= ~SDIO3_CONF_CLK_INV;
+			reg_val &= ~SDIO3_CONF_SD_FB_CLK;
 		} else {
 			reg_val |= SDIO3_CONF_CLK_INV;
 			reg_val &= ~SDIO3_CONF_SD_FB_CLK;
@@ -398,7 +402,7 @@
 	if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
 		ret = armada_38x_quirks(pdev, host);
 		if (ret < 0)
-			goto err_clk_get;
+			goto err_mbus_win;
 		ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
 		if (ret < 0)
 			goto err_mbus_win;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 64b7fdb..fbc7efd 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1160,6 +1160,8 @@
 	host->mmc->actual_clock = 0;
 
 	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+	if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
+		mdelay(1);
 
 	if (clock == 0)
 		return;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 7c02ff4..9d4aa31 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -412,6 +412,11 @@
 #define SDHCI_QUIRK2_ACMD23_BROKEN			(1<<14)
 /* Broken Clock divider zero in controller */
 #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN		(1<<15)
+/*
+ * When internal clock is disabled, a delay is needed before modifying the
+ * SD clock frequency or enabling back the internal clock.
+ */
+#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST	(1<<16)
 
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index a7b7a67..b981b85 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -210,6 +210,16 @@
 #define SDXC_IDMAC_DES0_CES	BIT(30) /* card error summary */
 #define SDXC_IDMAC_DES0_OWN	BIT(31) /* 1-idma owns it, 0-host owns it */
 
+#define SDXC_CLK_400K		0
+#define SDXC_CLK_25M		1
+#define SDXC_CLK_50M		2
+#define SDXC_CLK_50M_DDR	3
+
+struct sunxi_mmc_clk_delay {
+	u32 output;
+	u32 sample;
+};
+
 struct sunxi_idma_des {
 	u32	config;
 	u32	buf_size;
@@ -229,6 +239,7 @@
 	struct clk	*clk_mmc;
 	struct clk	*clk_sample;
 	struct clk	*clk_output;
+	const struct sunxi_mmc_clk_delay *clk_delays;
 
 	/* irq */
 	spinlock_t	lock;
@@ -654,25 +665,19 @@
 
 	/* determine delays */
 	if (rate <= 400000) {
-		oclk_dly = 180;
-		sclk_dly = 42;
+		oclk_dly = host->clk_delays[SDXC_CLK_400K].output;
+		sclk_dly = host->clk_delays[SDXC_CLK_400K].sample;
 	} else if (rate <= 25000000) {
-		oclk_dly = 180;
-		sclk_dly = 75;
+		oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
+		sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
 	} else if (rate <= 50000000) {
 		if (ios->timing == MMC_TIMING_UHS_DDR50) {
-			oclk_dly = 60;
-			sclk_dly = 120;
+			oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
+			sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
 		} else {
-			oclk_dly = 90;
-			sclk_dly = 150;
+			oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
+			sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
 		}
-	} else if (rate <= 100000000) {
-		oclk_dly = 6;
-		sclk_dly = 24;
-	} else if (rate <= 200000000) {
-		oclk_dly = 3;
-		sclk_dly = 12;
 	} else {
 		return -EINVAL;
 	}
@@ -871,6 +876,7 @@
 static const struct of_device_id sunxi_mmc_of_match[] = {
 	{ .compatible = "allwinner,sun4i-a10-mmc", },
 	{ .compatible = "allwinner,sun5i-a13-mmc", },
+	{ .compatible = "allwinner,sun9i-a80-mmc", },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
@@ -884,6 +890,20 @@
 	.hw_reset	 = sunxi_mmc_hw_reset,
 };
 
+static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
+	[SDXC_CLK_400K]		= { .output = 180, .sample = 180 },
+	[SDXC_CLK_25M]		= { .output = 180, .sample =  75 },
+	[SDXC_CLK_50M]		= { .output =  90, .sample = 120 },
+	[SDXC_CLK_50M_DDR]	= { .output =  60, .sample = 120 },
+};
+
+static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
+	[SDXC_CLK_400K]		= { .output = 180, .sample = 180 },
+	[SDXC_CLK_25M]		= { .output = 180, .sample =  75 },
+	[SDXC_CLK_50M]		= { .output = 150, .sample = 120 },
+	[SDXC_CLK_50M_DDR]	= { .output =  90, .sample = 120 },
+};
+
 static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
 				      struct platform_device *pdev)
 {
@@ -895,6 +915,11 @@
 	else
 		host->idma_des_size_bits = 16;
 
+	if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc"))
+		host->clk_delays = sun9i_mmc_clk_delays;
+	else
+		host->clk_delays = sunxi_mmc_clk_delays;
+
 	ret = mmc_regulator_get_supply(host->mmc);
 	if (ret) {
 		if (ret != -EPROBE_DEFER)
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 2426db8..f04445b 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -879,7 +879,7 @@
 				      oob_chunk_size);
 
 		/* the last chunk */
-		memcpy16_toio(&s[oob_chunk_size * sparebuf_size],
+		memcpy16_toio(&s[i * sparebuf_size],
 			      &d[i * oob_chunk_size],
 			      host->used_oobsize - i * oob_chunk_size);
 	}
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index f97a58d..e7d333c 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -147,6 +147,10 @@
 #define NFC_ECC_MODE		GENMASK(15, 12)
 #define NFC_RANDOM_SEED		GENMASK(30, 16)
 
+/* NFC_USER_DATA helper macros */
+#define NFC_BUF_TO_USER_DATA(buf)	((buf)[0] | ((buf)[1] << 8) | \
+					((buf)[2] << 16) | ((buf)[3] << 24))
+
 #define NFC_DEFAULT_TIMEOUT_MS	1000
 
 #define NFC_SRAM_SIZE		1024
@@ -646,15 +650,9 @@
 		offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
 
 		/* Fill OOB data in */
-		if (oob_required) {
-			tmp = 0xffffffff;
-			memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
-				    4);
-		} else {
-			memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
-				    chip->oob_poi + offset - mtd->writesize,
-				    4);
-		}
+		writel(NFC_BUF_TO_USER_DATA(chip->oob_poi +
+					    layout->oobfree[i].offset),
+		       nfc->regs + NFC_REG_USER_DATA_BASE);
 
 		chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
 
@@ -784,14 +782,8 @@
 		offset += ecc->size;
 
 		/* Fill OOB data in */
-		if (oob_required) {
-			tmp = 0xffffffff;
-			memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
-				    4);
-		} else {
-			memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
-				    4);
-		}
+		writel(NFC_BUF_TO_USER_DATA(oob),
+		       nfc->regs + NFC_REG_USER_DATA_BASE);
 
 		tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
 		      (1 << 30);
@@ -1389,6 +1381,7 @@
 					node);
 		nand_release(&chip->mtd);
 		sunxi_nand_ecc_cleanup(&chip->nand.ecc);
+		list_del(&chip->node);
 	}
 }
 
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 5bbd1f0..1fc23e4 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -926,6 +926,11 @@
 		goto bad;
 	}
 
+	if (data_size > ubi->leb_size) {
+		ubi_err(ubi, "bad data_size");
+		goto bad;
+	}
+
 	if (vol_type == UBI_VID_STATIC) {
 		/*
 		 * Although from high-level point of view static volumes may
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 80bdd5b..d85c197 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -649,6 +649,7 @@
 		if (ubi->corr_peb_count)
 			ubi_err(ubi, "%d PEBs are corrupted and not used",
 				ubi->corr_peb_count);
+		return -ENOSPC;
 	}
 	ubi->rsvd_pebs += reserved_pebs;
 	ubi->avail_pebs -= reserved_pebs;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 275d9fb..eb4489f9 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1601,6 +1601,7 @@
 		if (ubi->corr_peb_count)
 			ubi_err(ubi, "%d PEBs are corrupted and not used",
 				ubi->corr_peb_count);
+		err = -ENOSPC;
 		goto out_free;
 	}
 	ubi->avail_pebs -= reserved_pebs;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d18eb60..f184fb5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -298,7 +298,10 @@
 
 config NET_VRF
 	tristate "Virtual Routing and Forwarding (Lite)"
-	depends on IP_MULTIPLE_TABLES && IPV6_MULTIPLE_TABLES
+	depends on IP_MULTIPLE_TABLES
+	depends on NET_L3_MASTER_DEV
+	depends on IPV6 || IPV6=n
+	depends on IPV6_MULTIPLE_TABLES || IPV6=n
 	---help---
 	  This option enables the support for mapping interfaces into VRF's. The
 	  support enables VRF devices.
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index d7fdea1..20bfb9b 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -237,6 +237,8 @@
 		numsegs;	/* number of segments */
 };
 
+#define ARCNET_LED_NAME_SZ (IFNAMSIZ + 6)
+
 struct arcnet_local {
 	uint8_t config,		/* current value of CONFIG register */
 		timeout,	/* Extended timeout for COM20020 */
@@ -260,6 +262,13 @@
 	/* On preemtive and SMB a lock is needed */
 	spinlock_t lock;
 
+	struct led_trigger *tx_led_trig;
+	char tx_led_trig_name[ARCNET_LED_NAME_SZ];
+	struct led_trigger *recon_led_trig;
+	char recon_led_trig_name[ARCNET_LED_NAME_SZ];
+
+	struct timer_list	timer;
+
 	/*
 	 * Buffer management: an ARCnet card has 4 x 512-byte buffers, each of
 	 * which can be used for either sending or receiving.  The new dynamic
@@ -309,6 +318,8 @@
 		int (*reset)(struct net_device *dev, int really_reset);
 		void (*open)(struct net_device *dev);
 		void (*close)(struct net_device *dev);
+		void (*datatrigger) (struct net_device * dev, int enable);
+		void (*recontrigger) (struct net_device * dev, int enable);
 
 		void (*copy_to_card)(struct net_device *dev, int bufnum,
 				     int offset, void *buf, int count);
@@ -319,6 +330,16 @@
 	void __iomem *mem_start;	/* pointer to ioremap'ed MMIO */
 };
 
+enum arcnet_led_event {
+	ARCNET_LED_EVENT_RECON,
+	ARCNET_LED_EVENT_OPEN,
+	ARCNET_LED_EVENT_STOP,
+	ARCNET_LED_EVENT_TX,
+};
+
+void arcnet_led_event(struct net_device *netdev, enum arcnet_led_event event);
+void devm_arcnet_led_init(struct net_device *netdev, int index, int subid);
+
 #if ARCNET_DEBUG_MAX & D_SKB
 void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
 #else
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index e41dd36..6ea963e 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -52,6 +52,8 @@
 #include <linux/init.h>
 #include <linux/jiffies.h>
 
+#include <linux/leds.h>
+
 #include "arcdevice.h"
 #include "com9026.h"
 
@@ -189,6 +191,71 @@
 
 #endif
 
+/* Trigger a LED event in response to a ARCNET device event */
+void arcnet_led_event(struct net_device *dev, enum arcnet_led_event event)
+{
+	struct arcnet_local *lp = netdev_priv(dev);
+	unsigned long led_delay = 350;
+	unsigned long tx_delay = 50;
+
+	switch (event) {
+	case ARCNET_LED_EVENT_RECON:
+		led_trigger_blink_oneshot(lp->recon_led_trig,
+					  &led_delay, &led_delay, 0);
+		break;
+	case ARCNET_LED_EVENT_OPEN:
+		led_trigger_event(lp->tx_led_trig, LED_OFF);
+		led_trigger_event(lp->recon_led_trig, LED_OFF);
+		break;
+	case ARCNET_LED_EVENT_STOP:
+		led_trigger_event(lp->tx_led_trig, LED_OFF);
+		led_trigger_event(lp->recon_led_trig, LED_OFF);
+		break;
+	case ARCNET_LED_EVENT_TX:
+		led_trigger_blink_oneshot(lp->tx_led_trig,
+					  &tx_delay, &tx_delay, 0);
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(arcnet_led_event);
+
+static void arcnet_led_release(struct device *gendev, void *res)
+{
+	struct arcnet_local *lp = netdev_priv(to_net_dev(gendev));
+
+	led_trigger_unregister_simple(lp->tx_led_trig);
+	led_trigger_unregister_simple(lp->recon_led_trig);
+}
+
+/* Register ARCNET LED triggers for a arcnet device
+ *
+ * This is normally called from a driver's probe function
+ */
+void devm_arcnet_led_init(struct net_device *netdev, int index, int subid)
+{
+	struct arcnet_local *lp = netdev_priv(netdev);
+	void *res;
+
+	res = devres_alloc(arcnet_led_release, 0, GFP_KERNEL);
+	if (!res) {
+		netdev_err(netdev, "cannot register LED triggers\n");
+		return;
+	}
+
+	snprintf(lp->tx_led_trig_name, sizeof(lp->tx_led_trig_name),
+		 "arc%d-%d-tx", index, subid);
+	snprintf(lp->recon_led_trig_name, sizeof(lp->recon_led_trig_name),
+		 "arc%d-%d-recon", index, subid);
+
+	led_trigger_register_simple(lp->tx_led_trig_name,
+				    &lp->tx_led_trig);
+	led_trigger_register_simple(lp->recon_led_trig_name,
+				    &lp->recon_led_trig);
+
+	devres_add(&netdev->dev, res);
+}
+EXPORT_SYMBOL_GPL(devm_arcnet_led_init);
+
 /* Unregister a protocol driver from the arc_proto_map.  Protocol drivers
  * are responsible for registering themselves, but the unregister routine
  * is pretty generic so we'll do it here.
@@ -314,6 +381,16 @@
 	dev->flags = IFF_BROADCAST;
 }
 
+static void arcnet_timer(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+
+	if (!netif_carrier_ok(dev)) {
+		netif_carrier_on(dev);
+		netdev_info(dev, "link up\n");
+	}
+}
+
 struct net_device *alloc_arcdev(const char *name)
 {
 	struct net_device *dev;
@@ -325,6 +402,9 @@
 		struct arcnet_local *lp = netdev_priv(dev);
 
 		spin_lock_init(&lp->lock);
+		init_timer(&lp->timer);
+		lp->timer.data = (unsigned long) dev;
+		lp->timer.function = arcnet_timer;
 	}
 
 	return dev;
@@ -423,8 +503,11 @@
 	lp->hw.intmask(dev, lp->intmask);
 	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
 
+	netif_carrier_off(dev);
 	netif_start_queue(dev);
+	mod_timer(&lp->timer, jiffies + msecs_to_jiffies(1000));
 
+	arcnet_led_event(dev, ARCNET_LED_EVENT_OPEN);
 	return 0;
 
  out_module_put:
@@ -438,7 +521,11 @@
 {
 	struct arcnet_local *lp = netdev_priv(dev);
 
+	arcnet_led_event(dev, ARCNET_LED_EVENT_STOP);
+	del_timer_sync(&lp->timer);
+
 	netif_stop_queue(dev);
+	netif_carrier_off(dev);
 
 	/* flush TX and disable RX */
 	lp->hw.intmask(dev, 0);
@@ -515,7 +602,7 @@
 	struct ArcProto *proto;
 	int txbuf;
 	unsigned long flags;
-	int freeskb, retval;
+	int retval;
 
 	arc_printk(D_DURING, dev,
 		   "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n",
@@ -554,15 +641,13 @@
 			 *  the package later - forget about it now
 			 */
 			dev->stats.tx_bytes += skb->len;
-			freeskb = 1;
+			dev_kfree_skb(skb);
 		} else {
 			/* do it the 'split' way */
 			lp->outgoing.proto = proto;
 			lp->outgoing.skb = skb;
 			lp->outgoing.pkt = pkt;
 
-			freeskb = 0;
-
 			if (proto->continue_tx &&
 			    proto->continue_tx(dev, txbuf)) {
 				arc_printk(D_NORMAL, dev,
@@ -574,7 +659,6 @@
 		lp->next_tx = txbuf;
 	} else {
 		retval = NETDEV_TX_BUSY;
-		freeskb = 0;
 	}
 
 	arc_printk(D_DEBUG, dev, "%s: %d: %s, status: %x\n",
@@ -588,10 +672,9 @@
 	arc_printk(D_DEBUG, dev, "%s: %d: %s, status: %x\n",
 		   __FILE__, __LINE__, __func__, lp->hw.status(dev));
 
-	spin_unlock_irqrestore(&lp->lock, flags);
-	if (freeskb)
-		dev_kfree_skb(skb);
+	arcnet_led_event(dev, ARCNET_LED_EVENT_TX);
 
+	spin_unlock_irqrestore(&lp->lock, flags);
 	return retval;		/* no need to try again */
 }
 EXPORT_SYMBOL(arcnet_send_packet);
@@ -843,6 +926,13 @@
 
 			arc_printk(D_RECON, dev, "Network reconfiguration detected (status=%Xh)\n",
 				   status);
+			if (netif_carrier_ok(dev)) {
+				netif_carrier_off(dev);
+				netdev_info(dev, "link down\n");
+			}
+			mod_timer(&lp->timer, jiffies + msecs_to_jiffies(1000));
+
+			arcnet_led_event(dev, ARCNET_LED_EVENT_RECON);
 			/* MYRECON bit is at bit 7 of diagstatus */
 			if (diagstatus & 0x80)
 				arc_printk(D_RECON, dev, "Put out that recon myself\n");
@@ -893,6 +983,7 @@
 			lp->num_recons = lp->network_down = 0;
 
 			arc_printk(D_DURING, dev, "not recon: clearing counters anyway.\n");
+			netif_carrier_on(dev);
 		}
 
 		if (didsomething)
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index a12bf83..239de38 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -41,6 +41,7 @@
 #include <linux/pci.h>
 #include <linux/list.h>
 #include <linux/io.h>
+#include <linux/leds.h>
 
 #include "arcdevice.h"
 #include "com20020.h"
@@ -62,12 +63,43 @@
 module_param(clockm, int, 0);
 MODULE_LICENSE("GPL");
 
+static void led_tx_set(struct led_classdev *led_cdev,
+			     enum led_brightness value)
+{
+	struct com20020_dev *card;
+	struct com20020_priv *priv;
+	struct com20020_pci_card_info *ci;
+
+	card = container_of(led_cdev, struct com20020_dev, tx_led);
+
+	priv = card->pci_priv;
+	ci = priv->ci;
+
+	outb(!!value, priv->misc + ci->leds[card->index].green);
+}
+
+static void led_recon_set(struct led_classdev *led_cdev,
+			     enum led_brightness value)
+{
+	struct com20020_dev *card;
+	struct com20020_priv *priv;
+	struct com20020_pci_card_info *ci;
+
+	card = container_of(led_cdev, struct com20020_dev, recon_led);
+
+	priv = card->pci_priv;
+	ci = priv->ci;
+
+	outb(!!value, priv->misc + ci->leds[card->index].red);
+}
+
 static void com20020pci_remove(struct pci_dev *pdev);
 
 static int com20020pci_probe(struct pci_dev *pdev,
 			     const struct pci_device_id *id)
 {
 	struct com20020_pci_card_info *ci;
+	struct com20020_pci_channel_map *mm;
 	struct net_device *dev;
 	struct arcnet_local *lp;
 	struct com20020_priv *priv;
@@ -84,9 +116,22 @@
 
 	ci = (struct com20020_pci_card_info *)id->driver_data;
 	priv->ci = ci;
+	mm = &ci->misc_map;
 
 	INIT_LIST_HEAD(&priv->list_dev);
 
+	if (mm->size) {
+		ioaddr = pci_resource_start(pdev, mm->bar) + mm->offset;
+		r = devm_request_region(&pdev->dev, ioaddr, mm->size,
+					"com20020-pci");
+		if (!r) {
+			pr_err("IO region %xh-%xh already allocated.\n",
+			       ioaddr, ioaddr + mm->size - 1);
+			return -EBUSY;
+		}
+		priv->misc = ioaddr;
+	}
+
 	for (i = 0; i < ci->devcount; i++) {
 		struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i];
 		struct com20020_dev *card;
@@ -96,6 +141,7 @@
 			ret = -ENOMEM;
 			goto out_port;
 		}
+		dev->dev_port = i;
 
 		dev->netdev_ops = &com20020_netdev_ops;
 
@@ -131,6 +177,13 @@
 		lp->timeout = timeout;
 		lp->hw.owner = THIS_MODULE;
 
+		/* Get the dev_id from the PLX rotary coder */
+		if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15))
+			dev->dev_id = 0xc;
+		dev->dev_id ^= inb(priv->misc + ci->rotary) >> 4;
+
+		snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i);
+
 		if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
 			pr_err("IO address %Xh is empty!\n", ioaddr);
 			ret = -EIO;
@@ -148,14 +201,41 @@
 
 		card->index = i;
 		card->pci_priv = priv;
+		card->tx_led.brightness_set = led_tx_set;
+		card->tx_led.default_trigger = devm_kasprintf(&pdev->dev,
+						GFP_KERNEL, "arc%d-%d-tx",
+						dev->dev_id, i);
+		card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+						"pci:green:tx:%d-%d",
+						dev->dev_id, i);
+
+		card->tx_led.dev = &dev->dev;
+		card->recon_led.brightness_set = led_recon_set;
+		card->recon_led.default_trigger = devm_kasprintf(&pdev->dev,
+						GFP_KERNEL, "arc%d-%d-recon",
+						dev->dev_id, i);
+		card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+						"pci:red:recon:%d-%d",
+						dev->dev_id, i);
+		card->recon_led.dev = &dev->dev;
 		card->dev = dev;
 
+		ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
+		if (ret)
+			goto out_port;
+
+		ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
+		if (ret)
+			goto out_port;
+
 		dev_set_drvdata(&dev->dev, card);
 
 		ret = com20020_found(dev, IRQF_SHARED);
 		if (ret)
 			goto out_port;
 
+		devm_arcnet_led_init(dev, dev->dev_id, i);
+
 		list_add(&card->list, &priv->list_dev);
 	}
 
@@ -234,6 +314,18 @@
 			.size = 0x08,
 		},
 	},
+	.misc_map = {
+		.bar = 2,
+		.offset = 0x10,
+		.size = 0x04,
+	},
+	.leds = {
+		{
+			.green = 0x0,
+			.red = 0x1,
+		},
+	},
+	.rotary = 0x0,
 	.flags = ARC_CAN_10MBIT,
 };
 
@@ -251,6 +343,21 @@
 			.size = 0x08,
 		}
 	},
+	.misc_map = {
+		.bar = 2,
+		.offset = 0x10,
+		.size = 0x04,
+	},
+	.leds = {
+		{
+			.green = 0x0,
+			.red = 0x1,
+		}, {
+			.green = 0x2,
+			.red = 0x3,
+		},
+	},
+	.rotary = 0x0,
 	.flags = ARC_CAN_10MBIT,
 };
 
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index c82f323..13d9ad4 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -118,7 +118,7 @@
 		arcnet_outb(STARTIOcmd, ioaddr, COM20020_REG_W_COMMAND);
 	}
 
-	lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
+	lp->config = (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
 	/* set node ID to 0x42 (but transmitter is disabled, so it's okay) */
 	arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
 	arcnet_outb(0x42, ioaddr, COM20020_REG_W_XREG);
@@ -131,11 +131,6 @@
 	}
 	arc_printk(D_INIT_REASONS, dev, "status after reset: %X\n", status);
 
-	/* Enable TX */
-	lp->config |= TXENcfg;
-	arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
-	arcnet_outb(arcnet_inb(ioaddr, 8), ioaddr, COM20020_REG_W_XREG);
-
 	arcnet_outb(CFLAGScmd | RESETclear | CONFIGclear,
 		    ioaddr, COM20020_REG_W_COMMAND);
 	status = arcnet_inb(ioaddr, COM20020_REG_R_STATUS);
@@ -169,9 +164,33 @@
 	return 0;
 }
 
+static int com20020_netdev_open(struct net_device *dev)
+{
+	int ioaddr = dev->base_addr;
+	struct arcnet_local *lp = netdev_priv(dev);
+
+	lp->config |= TXENcfg;
+	arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
+
+	return arcnet_open(dev);
+}
+
+static int com20020_netdev_close(struct net_device *dev)
+{
+	int ioaddr = dev->base_addr;
+	struct arcnet_local *lp = netdev_priv(dev);
+
+	arcnet_close(dev);
+
+	/* disable transmitter */
+	lp->config &= ~TXENcfg;
+	arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
+	return 0;
+}
+
 const struct net_device_ops com20020_netdev_ops = {
-	.ndo_open	= arcnet_open,
-	.ndo_stop	= arcnet_close,
+	.ndo_open	= com20020_netdev_open,
+	.ndo_stop	= com20020_netdev_close,
 	.ndo_start_xmit = arcnet_send_packet,
 	.ndo_tx_timeout = arcnet_timeout,
 	.ndo_set_mac_address = com20020_set_hwaddr,
@@ -215,7 +234,7 @@
 		arcnet_outb(STARTIOcmd, ioaddr, COM20020_REG_W_COMMAND);
 	}
 
-	lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
+	lp->config = (lp->timeout << 3) | (lp->backplane << 2) | SUB_NODE;
 	/* Default 0x38 + register: Node ID */
 	arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
 	arcnet_outb(dev->dev_addr[0], ioaddr, COM20020_REG_W_XREG);
@@ -274,7 +293,7 @@
 		   dev->name, arcnet_inb(ioaddr, COM20020_REG_R_STATUS));
 
 	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
-	lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2);
+	lp->config |= (lp->timeout << 3) | (lp->backplane << 2);
 	/* power-up defaults */
 	arcnet_outb(lp->config, ioaddr, COM20020_REG_W_CONFIG);
 	arc_printk(D_DEBUG, dev, "%s: %d: %s\n", __FILE__, __LINE__, __func__);
diff --git a/drivers/net/arcnet/com20020.h b/drivers/net/arcnet/com20020.h
index 22a460f..0bcc5d0 100644
--- a/drivers/net/arcnet/com20020.h
+++ b/drivers/net/arcnet/com20020.h
@@ -26,6 +26,7 @@
  */
 #ifndef __COM20020_H
 #define __COM20020_H
+#include <linux/leds.h>
 
 int com20020_check(struct net_device *dev);
 int com20020_found(struct net_device *dev, int shared);
@@ -36,6 +37,11 @@
 
 #define PLX_PCI_MAX_CARDS 2
 
+struct ledoffsets {
+	int green;
+	int red;
+};
+
 struct com20020_pci_channel_map {
 	u32 bar;
 	u32 offset;
@@ -47,6 +53,10 @@
 	int devcount;
 
 	struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS];
+	struct com20020_pci_channel_map misc_map;
+
+	struct ledoffsets leds[PLX_PCI_MAX_CARDS];
+	int rotary;
 
 	unsigned int flags;
 };
@@ -54,12 +64,16 @@
 struct com20020_priv {
 	struct com20020_pci_card_info *ci;
 	struct list_head list_dev;
+	resource_size_t misc;
 };
 
 struct com20020_dev {
 	struct list_head list;
 	struct net_device *dev;
 
+	struct led_classdev tx_led;
+	struct led_classdev recon_led;
+
 	struct com20020_priv *pci_priv;
 	int index;
 };
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 90f2615..d0f23cd 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1071,7 +1071,7 @@
 				 NETIF_F_HIGHDMA | NETIF_F_LRO)
 
 #define BOND_ENC_FEATURES	(NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\
-				 NETIF_F_TSO)
+				 NETIF_F_ALL_TSO)
 
 static void bond_compute_features(struct bonding *bond)
 {
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 945c095..8b3275d 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -8,15 +8,6 @@
  * Public License ("GPL") version 2 as distributed in the 'COPYING'
  * file from the main directory of the linux kernel source.
  *
- *
- * Your platform definition file should specify something like:
- *
- * static struct at91_can_data ek_can_data = {
- *	transceiver_switch = sam9263ek_transceiver_switch,
- * };
- *
- * at91_add_device_can(&ek_can_data);
- *
  */
 
 #include <linux/clk.h>
@@ -33,7 +24,6 @@
 #include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/types.h>
-#include <linux/platform_data/atmel.h>
 
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
@@ -324,15 +314,6 @@
 	return reg_mid;
 }
 
-/*
- * Swtich transceiver on or off
- */
-static void at91_transceiver_switch(const struct at91_priv *priv, int on)
-{
-	if (priv->pdata && priv->pdata->transceiver_switch)
-		priv->pdata->transceiver_switch(on);
-}
-
 static void at91_setup_mailboxes(struct net_device *dev)
 {
 	struct at91_priv *priv = netdev_priv(dev);
@@ -416,7 +397,6 @@
 
 	at91_set_bittiming(dev);
 	at91_setup_mailboxes(dev);
-	at91_transceiver_switch(priv, 1);
 
 	/* enable chip */
 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
@@ -444,7 +424,6 @@
 	reg_mr = at91_read(priv, AT91_MR);
 	at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
 
-	at91_transceiver_switch(priv, 0);
 	priv->can.state = state;
 }
 
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index e5fac36..131026f 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -87,6 +87,7 @@
 	{PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 	{PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 	{PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
 #ifdef CONFIG_CAN_PEAK_PCIEC
 	{PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
 	{PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 10d8497..d9a42c6 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -601,7 +601,7 @@
 		stats->tx_errors++;
 		if (likely(skb)) {
 			cf->can_id |= CAN_ERR_LOSTARB;
-			cf->data[0] = (alc & 0x1f) >> 8;
+			cf->data[0] = (alc >> 8) & 0x1f;
 		}
 	}
 
@@ -854,4 +854,4 @@
 MODULE_AUTHOR("Peter Chen <xingkongcp@gmail.com>");
 MODULE_AUTHOR("Gerhard Bertelsmann <info@gerhard-bertelsmann.de>");
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION(DRV_NAME "CAN driver for Allwinner SoCs (A10/A20)");
+MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20)");
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 9d56515..6f946fe 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -21,10 +21,13 @@
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
+#include <linux/of_net.h>
 #include <net/dsa.h>
 #include <linux/ethtool.h>
 #include <linux/if_bridge.h>
 #include <linux/brcmphy.h>
+#include <linux/etherdevice.h>
+#include <net/switchdev.h>
 
 #include "bcm_sf2.h"
 #include "bcm_sf2_regs.h"
@@ -264,6 +267,50 @@
 	}
 }
 
+static inline void bcm_sf2_port_intr_enable(struct bcm_sf2_priv *priv,
+					    int port)
+{
+	unsigned int off;
+
+	switch (port) {
+	case 7:
+		off = P7_IRQ_OFF;
+		break;
+	case 0:
+		/* Port 0 interrupts are located on the first bank */
+		intrl2_0_mask_clear(priv, P_IRQ_MASK(P0_IRQ_OFF));
+		return;
+	default:
+		off = P_IRQ_OFF(port);
+		break;
+	}
+
+	intrl2_1_mask_clear(priv, P_IRQ_MASK(off));
+}
+
+static inline void bcm_sf2_port_intr_disable(struct bcm_sf2_priv *priv,
+					     int port)
+{
+	unsigned int off;
+
+	switch (port) {
+	case 7:
+		off = P7_IRQ_OFF;
+		break;
+	case 0:
+		/* Port 0 interrupts are located on the first bank */
+		intrl2_0_mask_set(priv, P_IRQ_MASK(P0_IRQ_OFF));
+		intrl2_0_writel(priv, P_IRQ_MASK(P0_IRQ_OFF), INTRL2_CPU_CLEAR);
+		return;
+	default:
+		off = P_IRQ_OFF(port);
+		break;
+	}
+
+	intrl2_1_mask_set(priv, P_IRQ_MASK(off));
+	intrl2_1_writel(priv, P_IRQ_MASK(off), INTRL2_CPU_CLEAR);
+}
+
 static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
 			      struct phy_device *phy)
 {
@@ -280,7 +327,7 @@
 	core_writel(priv, 0, CORE_G_PCTL_PORT(port));
 
 	/* Re-enable the GPHY and re-apply workarounds */
-	if (port == 0 && priv->hw_params.num_gphy == 1) {
+	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) {
 		bcm_sf2_gphy_enable_set(ds, true);
 		if (phy) {
 			/* if phy_stop() has been called before, phy
@@ -297,9 +344,9 @@
 		}
 	}
 
-	/* Enable port 7 interrupts to get notified */
-	if (port == 7)
-		intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF));
+	/* Enable MoCA port interrupts to get notified */
+	if (port == priv->moca_port)
+		bcm_sf2_port_intr_enable(priv, port);
 
 	/* Set this port, and only this one to be in the default VLAN,
 	 * if member of a bridge, restore its membership prior to
@@ -329,12 +376,10 @@
 	if (priv->wol_ports_mask & (1 << port))
 		return;
 
-	if (port == 7) {
-		intrl2_1_mask_set(priv, P_IRQ_MASK(P7_IRQ_OFF));
-		intrl2_1_writel(priv, P_IRQ_MASK(P7_IRQ_OFF), INTRL2_CPU_CLEAR);
-	}
+	if (port == priv->moca_port)
+		bcm_sf2_port_intr_disable(priv, port);
 
-	if (port == 0 && priv->hw_params.num_gphy == 1)
+	if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1)
 		bcm_sf2_gphy_enable_set(ds, false);
 
 	if (dsa_is_cpu_port(ds, port))
@@ -555,6 +600,236 @@
 	return 0;
 }
 
+/* Address Resolution Logic routines */
+static int bcm_sf2_arl_op_wait(struct bcm_sf2_priv *priv)
+{
+	unsigned int timeout = 10;
+	u32 reg;
+
+	do {
+		reg = core_readl(priv, CORE_ARLA_RWCTL);
+		if (!(reg & ARL_STRTDN))
+			return 0;
+
+		usleep_range(1000, 2000);
+	} while (timeout--);
+
+	return -ETIMEDOUT;
+}
+
+static int bcm_sf2_arl_rw_op(struct bcm_sf2_priv *priv, unsigned int op)
+{
+	u32 cmd;
+
+	if (op > ARL_RW)
+		return -EINVAL;
+
+	cmd = core_readl(priv, CORE_ARLA_RWCTL);
+	cmd &= ~IVL_SVL_SELECT;
+	cmd |= ARL_STRTDN;
+	if (op)
+		cmd |= ARL_RW;
+	else
+		cmd &= ~ARL_RW;
+	core_writel(priv, cmd, CORE_ARLA_RWCTL);
+
+	return bcm_sf2_arl_op_wait(priv);
+}
+
+static int bcm_sf2_arl_read(struct bcm_sf2_priv *priv, u64 mac,
+			    u16 vid, struct bcm_sf2_arl_entry *ent, u8 *idx,
+			    bool is_valid)
+{
+	unsigned int i;
+	int ret;
+
+	ret = bcm_sf2_arl_op_wait(priv);
+	if (ret)
+		return ret;
+
+	/* Read the 4 bins */
+	for (i = 0; i < 4; i++) {
+		u64 mac_vid;
+		u32 fwd_entry;
+
+		mac_vid = core_readq(priv, CORE_ARLA_MACVID_ENTRY(i));
+		fwd_entry = core_readl(priv, CORE_ARLA_FWD_ENTRY(i));
+		bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
+
+		if (ent->is_valid && is_valid) {
+			*idx = i;
+			return 0;
+		}
+
+		/* This is the MAC we just deleted */
+		if (!is_valid && (mac_vid & mac))
+			return 0;
+	}
+
+	return -ENOENT;
+}
+
+static int bcm_sf2_arl_op(struct bcm_sf2_priv *priv, int op, int port,
+			  const unsigned char *addr, u16 vid, bool is_valid)
+{
+	struct bcm_sf2_arl_entry ent;
+	u32 fwd_entry;
+	u64 mac, mac_vid = 0;
+	u8 idx = 0;
+	int ret;
+
+	/* Convert the array into a 64-bit MAC */
+	mac = bcm_sf2_mac_to_u64(addr);
+
+	/* Perform a read for the given MAC and VID */
+	core_writeq(priv, mac, CORE_ARLA_MAC);
+	core_writel(priv, vid, CORE_ARLA_VID);
+
+	/* Issue a read operation for this MAC */
+	ret = bcm_sf2_arl_rw_op(priv, 1);
+	if (ret)
+		return ret;
+
+	ret = bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
+	/* If this is a read, just finish now */
+	if (op)
+		return ret;
+
+	/* We could not find a matching MAC, so reset to a new entry */
+	if (ret) {
+		fwd_entry = 0;
+		idx = 0;
+	}
+
+	memset(&ent, 0, sizeof(ent));
+	ent.port = port;
+	ent.is_valid = is_valid;
+	ent.vid = vid;
+	ent.is_static = true;
+	memcpy(ent.mac, addr, ETH_ALEN);
+	bcm_sf2_arl_from_entry(&mac_vid, &fwd_entry, &ent);
+
+	core_writeq(priv, mac_vid, CORE_ARLA_MACVID_ENTRY(idx));
+	core_writel(priv, fwd_entry, CORE_ARLA_FWD_ENTRY(idx));
+
+	ret = bcm_sf2_arl_rw_op(priv, 0);
+	if (ret)
+		return ret;
+
+	/* Re-read the entry to check */
+	return bcm_sf2_arl_read(priv, mac, vid, &ent, &idx, is_valid);
+}
+
+static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
+				  const struct switchdev_obj_port_fdb *fdb,
+				  struct switchdev_trans *trans)
+{
+	/* We do not need to do anything specific here yet */
+	return 0;
+}
+
+static int bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
+			      const struct switchdev_obj_port_fdb *fdb,
+			      struct switchdev_trans *trans)
+{
+	struct bcm_sf2_priv *priv = ds_to_priv(ds);
+
+	return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true);
+}
+
+static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
+			      const struct switchdev_obj_port_fdb *fdb)
+{
+	struct bcm_sf2_priv *priv = ds_to_priv(ds);
+
+	return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
+}
+
+static int bcm_sf2_arl_search_wait(struct bcm_sf2_priv *priv)
+{
+	unsigned timeout = 1000;
+	u32 reg;
+
+	do {
+		reg = core_readl(priv, CORE_ARLA_SRCH_CTL);
+		if (!(reg & ARLA_SRCH_STDN))
+			return 0;
+
+		if (reg & ARLA_SRCH_VLID)
+			return 0;
+
+		usleep_range(1000, 2000);
+	} while (timeout--);
+
+	return -ETIMEDOUT;
+}
+
+static void bcm_sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
+				  struct bcm_sf2_arl_entry *ent)
+{
+	u64 mac_vid;
+	u32 fwd_entry;
+
+	mac_vid = core_readq(priv, CORE_ARLA_SRCH_RSLT_MACVID(idx));
+	fwd_entry = core_readl(priv, CORE_ARLA_SRCH_RSLT(idx));
+	bcm_sf2_arl_to_entry(ent, mac_vid, fwd_entry);
+}
+
+static int bcm_sf2_sw_fdb_copy(struct net_device *dev, int port,
+			       const struct bcm_sf2_arl_entry *ent,
+			       struct switchdev_obj_port_fdb *fdb,
+			       int (*cb)(struct switchdev_obj *obj))
+{
+	if (!ent->is_valid)
+		return 0;
+
+	if (port != ent->port)
+		return 0;
+
+	ether_addr_copy(fdb->addr, ent->mac);
+	fdb->vid = ent->vid;
+	fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE;
+
+	return cb(&fdb->obj);
+}
+
+static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port,
+			       struct switchdev_obj_port_fdb *fdb,
+			       int (*cb)(struct switchdev_obj *obj))
+{
+	struct bcm_sf2_priv *priv = ds_to_priv(ds);
+	struct net_device *dev = ds->ports[port];
+	struct bcm_sf2_arl_entry results[2];
+	unsigned int count = 0;
+	int ret;
+
+	/* Start search operation */
+	core_writel(priv, ARLA_SRCH_STDN, CORE_ARLA_SRCH_CTL);
+
+	do {
+		ret = bcm_sf2_arl_search_wait(priv);
+		if (ret)
+			return ret;
+
+		/* Read both entries, then return their values back */
+		bcm_sf2_arl_search_rd(priv, 0, &results[0]);
+		ret = bcm_sf2_sw_fdb_copy(dev, port, &results[0], fdb, cb);
+		if (ret)
+			return ret;
+
+		bcm_sf2_arl_search_rd(priv, 1, &results[1]);
+		ret = bcm_sf2_sw_fdb_copy(dev, port, &results[1], fdb, cb);
+		if (ret)
+			return ret;
+
+		if (!results[0].is_valid && !results[1].is_valid)
+			break;
+
+	} while (count++ < CORE_ARLA_NUM_ENTRIES);
+
+	return 0;
+}
+
 static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id)
 {
 	struct bcm_sf2_priv *priv = dev_id;
@@ -615,6 +890,42 @@
 	intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
 }
 
+static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
+				   struct device_node *dn)
+{
+	struct device_node *port;
+	const char *phy_mode_str;
+	int mode;
+	unsigned int port_num;
+	int ret;
+
+	priv->moca_port = -1;
+
+	for_each_available_child_of_node(dn, port) {
+		if (of_property_read_u32(port, "reg", &port_num))
+			continue;
+
+		/* Internal PHYs get assigned a specific 'phy-mode' property
+		 * value: "internal" to help flag them before MDIO probing
+		 * has completed, since they might be turned off at that
+		 * time
+		 */
+		mode = of_get_phy_mode(port);
+		if (mode < 0) {
+			ret = of_property_read_string(port, "phy-mode",
+						      &phy_mode_str);
+			if (ret < 0)
+				continue;
+
+			if (!strcasecmp(phy_mode_str, "internal"))
+				priv->int_phy_mask |= 1 << port_num;
+		}
+
+		if (mode == PHY_INTERFACE_MODE_MOCA)
+			priv->moca_port = port_num;
+	}
+}
+
 static int bcm_sf2_sw_setup(struct dsa_switch *ds)
 {
 	const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -633,6 +944,7 @@
 	 * level
 	 */
 	dn = ds->pd->of_node->parent;
+	bcm_sf2_identify_ports(priv, ds->pd->of_node);
 
 	priv->irq0 = irq_of_parse_and_map(dn, 0);
 	priv->irq1 = irq_of_parse_and_map(dn, 1);
@@ -913,7 +1225,7 @@
 
 	status->link = 0;
 
-	/* Port 7 is special as we do not get link status from CORE_LNKSTS,
+	/* MoCA port is special as we do not get link status from CORE_LNKSTS,
 	 * which means that we need to force the link at the port override
 	 * level to get the data to flow. We do use what the interrupt handler
 	 * did determine before.
@@ -921,7 +1233,7 @@
 	 * For the other ports, we just force the link status, since this is
 	 * a fixed PHY device.
 	 */
-	if (port == 7) {
+	if (port == priv->moca_port) {
 		status->link = priv->port_sts[port].link;
 		/* For MoCA interfaces, also force a link down notification
 		 * since some version of the user-space daemon (mocad) use
@@ -1076,6 +1388,10 @@
 	.port_join_bridge	= bcm_sf2_sw_br_join,
 	.port_leave_bridge	= bcm_sf2_sw_br_leave,
 	.port_stp_update	= bcm_sf2_sw_br_set_stp_state,
+	.port_fdb_prepare	= bcm_sf2_sw_fdb_prepare,
+	.port_fdb_add		= bcm_sf2_sw_fdb_add,
+	.port_fdb_del		= bcm_sf2_sw_fdb_del,
+	.port_fdb_dump		= bcm_sf2_sw_fdb_dump,
 };
 
 static int __init bcm_sf2_init(void)
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 789d7b7..6bba1c9 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -19,6 +19,8 @@
 #include <linux/mutex.h>
 #include <linux/mii.h>
 #include <linux/ethtool.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
 
 #include <net/dsa.h>
 
@@ -50,6 +52,60 @@
 	u32 vlan_ctl_mask;
 };
 
+struct bcm_sf2_arl_entry {
+	u8 port;
+	u8 mac[ETH_ALEN];
+	u16 vid;
+	u8 is_valid:1;
+	u8 is_age:1;
+	u8 is_static:1;
+};
+
+static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst)
+{
+	unsigned int i;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff;
+}
+
+static inline u64 bcm_sf2_mac_to_u64(const u8 *src)
+{
+	unsigned int i;
+	u64 dst = 0;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i);
+
+	return dst;
+}
+
+static inline void bcm_sf2_arl_to_entry(struct bcm_sf2_arl_entry *ent,
+					u64 mac_vid, u32 fwd_entry)
+{
+	memset(ent, 0, sizeof(*ent));
+	ent->port = fwd_entry & PORTID_MASK;
+	ent->is_valid = !!(fwd_entry & ARL_VALID);
+	ent->is_age = !!(fwd_entry & ARL_AGE);
+	ent->is_static = !!(fwd_entry & ARL_STATIC);
+	bcm_sf2_mac_from_u64(mac_vid, ent->mac);
+	ent->vid = mac_vid >> VID_SHIFT;
+}
+
+static inline void bcm_sf2_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
+					  const struct bcm_sf2_arl_entry *ent)
+{
+	*mac_vid = bcm_sf2_mac_to_u64(ent->mac);
+	*mac_vid |= (u64)(ent->vid & VID_MASK) << VID_SHIFT;
+	*fwd_entry = ent->port & PORTID_MASK;
+	if (ent->is_valid)
+		*fwd_entry |= ARL_VALID;
+	if (ent->is_static)
+		*fwd_entry |= ARL_STATIC;
+	if (ent->is_age)
+		*fwd_entry |= ARL_AGE;
+}
+
 struct bcm_sf2_priv {
 	/* Base registers, keep those in order with BCM_SF2_REGS_NAME */
 	void __iomem			*core;
@@ -78,6 +134,12 @@
 
 	/* Mask of ports enabled for Wake-on-LAN */
 	u32				wol_ports_mask;
+
+	/* MoCA port location */
+	int				moca_port;
+
+	/* Bitmask of ports having an integrated PHY */
+	unsigned int			int_phy_mask;
 };
 
 struct bcm_sf2_hw_stats {
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index fa4e6e7..97780d4 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -231,6 +231,49 @@
 #define CORE_BRCM_HDR_RX_DIS		0x0980
 #define CORE_BRCM_HDR_TX_DIS		0x0988
 
+#define CORE_ARLA_NUM_ENTRIES		1024
+
+#define CORE_ARLA_RWCTL			0x1400
+#define  ARL_RW				(1 << 0)
+#define  IVL_SVL_SELECT			(1 << 6)
+#define  ARL_STRTDN			(1 << 7)
+
+#define CORE_ARLA_MAC			0x1408
+#define CORE_ARLA_VID			0x1420
+#define  ARLA_VIDTAB_INDX_MASK		0x1fff
+
+#define CORE_ARLA_MACVID0		0x1440
+#define  MAC_MASK			0xffffffffff
+#define  VID_SHIFT			48
+#define  VID_MASK			0xfff
+
+#define CORE_ARLA_FWD_ENTRY0		0x1460
+#define  PORTID_MASK			0x1ff
+#define  ARL_CON_SHIFT			9
+#define  ARL_CON_MASK			0x3
+#define  ARL_PRI_SHIFT			11
+#define  ARL_PRI_MASK			0x7
+#define  ARL_AGE			(1 << 14)
+#define  ARL_STATIC			(1 << 15)
+#define  ARL_VALID			(1 << 16)
+
+#define CORE_ARLA_MACVID_ENTRY(x)	(CORE_ARLA_MACVID0 + ((x) * 0x40))
+#define CORE_ARLA_FWD_ENTRY(x)		(CORE_ARLA_FWD_ENTRY0 + ((x) * 0x40))
+
+#define CORE_ARLA_SRCH_CTL		0x1540
+#define  ARLA_SRCH_VLID			(1 << 0)
+#define  IVL_SVL_SELECT			(1 << 6)
+#define  ARLA_SRCH_STDN			(1 << 7)
+
+#define CORE_ARLA_SRCH_ADR		0x1544
+#define  ARLA_SRCH_ADR_VALID		(1 << 15)
+
+#define CORE_ARLA_SRCH_RSLT_0_MACVID	0x1580
+#define CORE_ARLA_SRCH_RSLT_0		0x15a0
+
+#define CORE_ARLA_SRCH_RSLT_MACVID(x)	(CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40))
+#define CORE_ARLA_SRCH_RSLT(x)		(CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40))
+
 #define CORE_MEM_PSM_VDD_CTRL		0x2380
 #define  P_TXQ_PSM_VDD_SHIFT		2
 #define  P_TXQ_PSM_VDD_MASK		0x3
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index c29aebe..9093577 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -26,7 +26,7 @@
 	if (bus == NULL)
 		return -EINVAL;
 
-	return mdiobus_read(bus, ds->pd->sw_addr + addr, reg);
+	return mdiobus_read_nested(bus, ds->pd->sw_addr + addr, reg);
 }
 
 #define REG_READ(addr, reg)					\
@@ -47,7 +47,7 @@
 	if (bus == NULL)
 		return -EINVAL;
 
-	return mdiobus_write(bus, ds->pd->sw_addr + addr, reg, val);
+	return mdiobus_write_nested(bus, ds->pd->sw_addr + addr, reg, val);
 }
 
 #define REG_WRITE(addr, reg, val)				\
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 3de2a6d..4bcfd68 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -125,7 +125,6 @@
 	.set_addr		= mv88e6xxx_set_addr_indirect,
 	.phy_read		= mv88e6xxx_phy_read,
 	.phy_write		= mv88e6xxx_phy_write,
-	.poll_link		= mv88e6xxx_poll_link,
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 3e83865..c73121c 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -178,7 +178,6 @@
 	.set_addr		= mv88e6xxx_set_addr_direct,
 	.phy_read		= mv88e6131_phy_read,
 	.phy_write		= mv88e6131_phy_write,
-	.poll_link		= mv88e6xxx_poll_link,
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index c2daaf0..2c8eb6f 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -104,7 +104,6 @@
 	.set_addr		= mv88e6xxx_set_addr_indirect,
 	.phy_read		= mv88e6xxx_phy_read_indirect,
 	.phy_write		= mv88e6xxx_phy_write_indirect,
-	.poll_link		= mv88e6xxx_poll_link,
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
@@ -114,17 +113,16 @@
 #endif
 	.get_regs_len		= mv88e6xxx_get_regs_len,
 	.get_regs		= mv88e6xxx_get_regs,
-	.port_join_bridge       = mv88e6xxx_join_bridge,
-	.port_leave_bridge      = mv88e6xxx_leave_bridge,
 	.port_stp_update        = mv88e6xxx_port_stp_update,
 	.port_pvid_get		= mv88e6xxx_port_pvid_get,
 	.port_pvid_set		= mv88e6xxx_port_pvid_set,
 	.port_vlan_add		= mv88e6xxx_port_vlan_add,
 	.port_vlan_del		= mv88e6xxx_port_vlan_del,
 	.vlan_getnext		= mv88e6xxx_vlan_getnext,
+	.port_fdb_prepare	= mv88e6xxx_port_fdb_prepare,
 	.port_fdb_add		= mv88e6xxx_port_fdb_add,
 	.port_fdb_del		= mv88e6xxx_port_fdb_del,
-	.port_fdb_getnext	= mv88e6xxx_port_fdb_getnext,
+	.port_fdb_dump		= mv88e6xxx_port_fdb_dump,
 };
 
 MODULE_ALIAS("platform:mv88e6171");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 1f5129c..cbf4dd8 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -324,7 +324,6 @@
 	.set_addr		= mv88e6xxx_set_addr_indirect,
 	.phy_read		= mv88e6xxx_phy_read_indirect,
 	.phy_write		= mv88e6xxx_phy_write_indirect,
-	.poll_link		= mv88e6xxx_poll_link,
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
@@ -341,17 +340,16 @@
 	.set_eeprom		= mv88e6352_set_eeprom,
 	.get_regs_len		= mv88e6xxx_get_regs_len,
 	.get_regs		= mv88e6xxx_get_regs,
-	.port_join_bridge	= mv88e6xxx_join_bridge,
-	.port_leave_bridge	= mv88e6xxx_leave_bridge,
 	.port_stp_update	= mv88e6xxx_port_stp_update,
 	.port_pvid_get		= mv88e6xxx_port_pvid_get,
 	.port_pvid_set		= mv88e6xxx_port_pvid_set,
 	.port_vlan_add		= mv88e6xxx_port_vlan_add,
 	.port_vlan_del		= mv88e6xxx_port_vlan_del,
 	.vlan_getnext		= mv88e6xxx_vlan_getnext,
+	.port_fdb_prepare	= mv88e6xxx_port_fdb_prepare,
 	.port_fdb_add		= mv88e6xxx_port_fdb_add,
 	.port_fdb_del		= mv88e6xxx_port_fdb_del,
-	.port_fdb_getnext	= mv88e6xxx_port_fdb_getnext,
+	.port_fdb_dump		= mv88e6xxx_port_fdb_dump,
 };
 
 MODULE_ALIAS("platform:mv88e6172");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 8e9d172..b1b14f5 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -11,7 +11,6 @@
  * (at your option) any later version.
  */
 
-#include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
@@ -21,38 +20,10 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
-#include <linux/seq_file.h>
 #include <net/dsa.h>
+#include <net/switchdev.h>
 #include "mv88e6xxx.h"
 
-/* MDIO bus access can be nested in the case of PHYs connected to the
- * internal MDIO bus of the switch, which is accessed via MDIO bus of
- * the Ethernet interface. Avoid lockdep false positives by using
- * mutex_lock_nested().
- */
-static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
-{
-	int ret;
-
-	mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
-	ret = bus->read(bus, addr, regnum);
-	mutex_unlock(&bus->mdio_lock);
-
-	return ret;
-}
-
-static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
-				   u16 val)
-{
-	int ret;
-
-	mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
-	ret = bus->write(bus, addr, regnum, val);
-	mutex_unlock(&bus->mdio_lock);
-
-	return ret;
-}
-
 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
  * use all 32 SMI bus addresses on its SMI bus, and all switch registers
  * will be directly accessible on some {device address,register address}
@@ -67,7 +38,7 @@
 	int i;
 
 	for (i = 0; i < 16; i++) {
-		ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
+		ret = mdiobus_read_nested(bus, sw_addr, SMI_CMD);
 		if (ret < 0)
 			return ret;
 
@@ -83,7 +54,7 @@
 	int ret;
 
 	if (sw_addr == 0)
-		return mv88e6xxx_mdiobus_read(bus, addr, reg);
+		return mdiobus_read_nested(bus, addr, reg);
 
 	/* Wait for the bus to become free. */
 	ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -91,8 +62,8 @@
 		return ret;
 
 	/* Transmit the read command. */
-	ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
-				      SMI_CMD_OP_22_READ | (addr << 5) | reg);
+	ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
+				   SMI_CMD_OP_22_READ | (addr << 5) | reg);
 	if (ret < 0)
 		return ret;
 
@@ -102,7 +73,7 @@
 		return ret;
 
 	/* Read the data. */
-	ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
+	ret = mdiobus_read_nested(bus, sw_addr, SMI_DATA);
 	if (ret < 0)
 		return ret;
 
@@ -146,7 +117,7 @@
 	int ret;
 
 	if (sw_addr == 0)
-		return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
+		return mdiobus_write_nested(bus, addr, reg, val);
 
 	/* Wait for the bus to become free. */
 	ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -154,13 +125,13 @@
 		return ret;
 
 	/* Transmit the data to write. */
-	ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
+	ret = mdiobus_write_nested(bus, sw_addr, SMI_DATA, val);
 	if (ret < 0)
 		return ret;
 
 	/* Transmit the write command. */
-	ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
-				      SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
+	ret = mdiobus_write_nested(bus, sw_addr, SMI_CMD,
+				   SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
 	if (ret < 0)
 		return ret;
 
@@ -388,73 +359,6 @@
 }
 #endif
 
-void mv88e6xxx_poll_link(struct dsa_switch *ds)
-{
-	int i;
-
-	for (i = 0; i < DSA_MAX_PORTS; i++) {
-		struct net_device *dev;
-		int uninitialized_var(port_status);
-		int pcs_ctrl;
-		int link;
-		int speed;
-		int duplex;
-		int fc;
-
-		dev = ds->ports[i];
-		if (dev == NULL)
-			continue;
-
-		pcs_ctrl = mv88e6xxx_reg_read(ds, REG_PORT(i), PORT_PCS_CTRL);
-		if (pcs_ctrl < 0 || pcs_ctrl & PORT_PCS_CTRL_FORCE_LINK)
-			continue;
-
-		link = 0;
-		if (dev->flags & IFF_UP) {
-			port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
-							 PORT_STATUS);
-			if (port_status < 0)
-				continue;
-
-			link = !!(port_status & PORT_STATUS_LINK);
-		}
-
-		if (!link) {
-			if (netif_carrier_ok(dev)) {
-				netdev_info(dev, "link down\n");
-				netif_carrier_off(dev);
-			}
-			continue;
-		}
-
-		switch (port_status & PORT_STATUS_SPEED_MASK) {
-		case PORT_STATUS_SPEED_10:
-			speed = 10;
-			break;
-		case PORT_STATUS_SPEED_100:
-			speed = 100;
-			break;
-		case PORT_STATUS_SPEED_1000:
-			speed = 1000;
-			break;
-		default:
-			speed = -1;
-			break;
-		}
-		duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
-		fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
-
-		if (!netif_carrier_ok(dev)) {
-			netdev_info(dev,
-				    "link up, %d Mb/s, %s duplex, flow control %sabled\n",
-				    speed,
-				    duplex ? "full" : "half",
-				    fc ? "en" : "dis");
-			netif_carrier_on(dev);
-		}
-	}
-}
-
 static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -574,7 +478,8 @@
 			   struct phy_device *phydev)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	u32 ret, reg;
+	u32 reg;
+	int ret;
 
 	if (!phy_is_pseudo_fixed_link(phydev))
 		return;
@@ -941,13 +846,6 @@
 			       GLOBAL_ATU_OP_BUSY);
 }
 
-/* Must be called with SMI lock held */
-static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds)
-{
-	return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
-			       GLOBAL2_SCRATCH_BUSY);
-}
-
 /* Must be called with SMI mutex held */
 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
 					int regnum)
@@ -1111,11 +1009,6 @@
 	return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
 }
 
-static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
-{
-	return _mv88e6xxx_atu_flush(ds, fid, false);
-}
-
 static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
 			       int to_port, bool static_too)
 {
@@ -1177,132 +1070,23 @@
 	return ret;
 }
 
-/* Must be called with smi lock held */
-static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
+static int _mv88e6xxx_port_vlan_map_set(struct dsa_switch *ds, int port,
+					u16 output_ports)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	u8 fid = ps->fid[port];
-	u16 reg = fid << 12;
+	const u16 mask = (1 << ps->num_ports) - 1;
+	int reg;
 
-	if (dsa_is_cpu_port(ds, port))
-		reg |= ds->phys_port_mask;
-	else
-		reg |= (ps->bridge_mask[fid] |
-		       (1 << dsa_upstream_port(ds))) & ~(1 << port);
+	reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
+	if (reg < 0)
+		return reg;
+
+	reg &= ~mask;
+	reg |= output_ports & mask;
 
 	return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
 }
 
-/* Must be called with smi lock held */
-static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int port;
-	u32 mask;
-	int ret;
-
-	mask = ds->phys_port_mask;
-	while (mask) {
-		port = __ffs(mask);
-		mask &= ~(1 << port);
-		if (ps->fid[port] != fid)
-			continue;
-
-		ret = _mv88e6xxx_update_port_config(ds, port);
-		if (ret)
-			return ret;
-	}
-
-	return _mv88e6xxx_flush_fid(ds, fid);
-}
-
-/* Bridge handling functions */
-
-int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret = 0;
-	u32 nmask;
-	int fid;
-
-	/* If the bridge group is not empty, join that group.
-	 * Otherwise create a new group.
-	 */
-	fid = ps->fid[port];
-	nmask = br_port_mask & ~(1 << port);
-	if (nmask)
-		fid = ps->fid[__ffs(nmask)];
-
-	nmask = ps->bridge_mask[fid] | (1 << port);
-	if (nmask != br_port_mask) {
-		netdev_err(ds->ports[port],
-			   "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
-			   fid, br_port_mask, nmask);
-		return -EINVAL;
-	}
-
-	mutex_lock(&ps->smi_mutex);
-
-	ps->bridge_mask[fid] = br_port_mask;
-
-	if (fid != ps->fid[port]) {
-		clear_bit(ps->fid[port], ps->fid_bitmap);
-		ps->fid[port] = fid;
-		ret = _mv88e6xxx_update_bridge_config(ds, fid);
-	}
-
-	mutex_unlock(&ps->smi_mutex);
-
-	return ret;
-}
-
-int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	u8 fid, newfid;
-	int ret;
-
-	fid = ps->fid[port];
-
-	if (ps->bridge_mask[fid] != br_port_mask) {
-		netdev_err(ds->ports[port],
-			   "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
-			   fid, br_port_mask, ps->bridge_mask[fid]);
-		return -EINVAL;
-	}
-
-	/* If the port was the last port of a bridge, we are done.
-	 * Otherwise assign a new fid to the port, and fix up
-	 * the bridge configuration.
-	 */
-	if (br_port_mask == (1 << port))
-		return 0;
-
-	mutex_lock(&ps->smi_mutex);
-
-	newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
-	if (unlikely(newfid > ps->num_ports)) {
-		netdev_err(ds->ports[port], "all first %d FIDs are used\n",
-			   ps->num_ports);
-		ret = -ENOSPC;
-		goto unlock;
-	}
-
-	ps->fid[port] = newfid;
-	set_bit(newfid, ps->fid_bitmap);
-	ps->bridge_mask[fid] &= ~(1 << port);
-	ps->bridge_mask[newfid] = 1 << port;
-
-	ret = _mv88e6xxx_update_bridge_config(ds, fid);
-	if (!ret)
-		ret = _mv88e6xxx_update_bridge_config(ds, newfid);
-
-unlock:
-	mutex_unlock(&ps->smi_mutex);
-
-	return ret;
-}
-
 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -1438,7 +1222,13 @@
 	return 0;
 }
 
-static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
+static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
+{
+	return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
+				    vid & GLOBAL_VTU_VID_MASK);
+}
+
+static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
 				  struct mv88e6xxx_vtu_stu_entry *entry)
 {
 	struct mv88e6xxx_vtu_stu_entry next = { 0 };
@@ -1448,11 +1238,6 @@
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
-				   vid & GLOBAL_VTU_VID_MASK);
-	if (ret < 0)
-		return ret;
-
 	ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
 	if (ret < 0)
 		return ret;
@@ -1612,6 +1397,7 @@
 	struct mv88e6xxx_vtu_stu_entry vlan = {
 		.valid = true,
 		.vid = vid,
+		.fid = vid, /* We use one FID per VLAN */
 	};
 	int i;
 
@@ -1645,22 +1431,10 @@
 				return err;
 		}
 
-		/* Non-bridged ports and bridge groups use FIDs from 1 to
-		 * num_ports; VLANs use FIDs from num_ports+1 to 4095.
-		 */
-		vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID,
-					      ps->num_ports + 1);
-		if (unlikely(vlan.fid == VLAN_N_VID)) {
-			pr_err("no more FID available for VLAN %d\n", vid);
-			return -ENOSPC;
-		}
-
 		/* Clear all MAC addresses from the new database */
 		err = _mv88e6xxx_atu_flush(ds, vlan.fid, true);
 		if (err)
 			return err;
-
-		set_bit(vlan.fid, ps->fid_bitmap);
 	}
 
 	*entry = vlan;
@@ -1675,7 +1449,12 @@
 	int err;
 
 	mutex_lock(&ps->smi_mutex);
-	err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+
+	err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+	if (err)
+		goto unlock;
+
+	err = _mv88e6xxx_vtu_getnext(ds, &vlan);
 	if (err)
 		goto unlock;
 
@@ -1700,12 +1479,15 @@
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	struct mv88e6xxx_vtu_stu_entry vlan;
-	bool keep = false;
 	int i, err;
 
 	mutex_lock(&ps->smi_mutex);
 
-	err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+	err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+	if (err)
+		goto unlock;
+
+	err = _mv88e6xxx_vtu_getnext(ds, &vlan);
 	if (err)
 		goto unlock;
 
@@ -1718,57 +1500,28 @@
 	vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
 
 	/* keep the VLAN unless all ports are excluded */
+	vlan.valid = false;
 	for (i = 0; i < ps->num_ports; ++i) {
 		if (dsa_is_cpu_port(ds, i))
 			continue;
 
 		if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
-			keep = true;
+			vlan.valid = true;
 			break;
 		}
 	}
 
-	vlan.valid = keep;
 	err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
 	if (err)
 		goto unlock;
 
 	err = _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
-	if (err)
-		goto unlock;
-
-	if (!keep)
-		clear_bit(vlan.fid, ps->fid_bitmap);
-
 unlock:
 	mutex_unlock(&ps->smi_mutex);
 
 	return err;
 }
 
-static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
-				       struct mv88e6xxx_vtu_stu_entry *entry)
-{
-	int err;
-
-	do {
-		if (vid == 4095)
-			return -ENOENT;
-
-		err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
-		if (err)
-			return err;
-
-		if (!entry->valid)
-			return -ENOENT;
-
-		vid = entry->vid;
-	} while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
-		 entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
-
-	return 0;
-}
-
 int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
 			   unsigned long *ports, unsigned long *untagged)
 {
@@ -1781,7 +1534,12 @@
 		return -ENOENT;
 
 	mutex_lock(&ps->smi_mutex);
-	err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
+	err = _mv88e6xxx_vtu_vid_write(ds, *vid);
+	if (err)
+		goto unlock;
+
+	err = _mv88e6xxx_vtu_getnext(ds, &next);
+unlock:
 	mutex_unlock(&ps->smi_mutex);
 
 	if (err)
@@ -1866,37 +1624,13 @@
 	return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
 }
 
-static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	struct mv88e6xxx_vtu_stu_entry vlan;
-	int err;
-
-	if (vid == 0)
-		return ps->fid[port];
-
-	err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan);
-	if (err)
-		return err;
-
-	if (vlan.vid == vid)
-		return vlan.fid;
-
-	return -ENOENT;
-}
-
 static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
 				    const unsigned char *addr, u16 vid,
 				    u8 state)
 {
 	struct mv88e6xxx_atu_entry entry = { 0 };
-	int ret;
 
-	ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
-	if (ret < 0)
-		return ret;
-
-	entry.fid = ret;
+	entry.fid = vid; /* We use one FID per VLAN */
 	entry.state = state;
 	ether_addr_copy(entry.mac, addr);
 	if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
@@ -1907,30 +1641,45 @@
 	return _mv88e6xxx_atu_load(ds, &entry);
 }
 
-int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
-			   const unsigned char *addr, u16 vid)
+int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
+			       const struct switchdev_obj_port_fdb *fdb,
+			       struct switchdev_trans *trans)
 {
-	int state = is_multicast_ether_addr(addr) ?
+	/* We don't use per-port FDB */
+	if (fdb->vid == 0)
+		return -EOPNOTSUPP;
+
+	/* We don't need any dynamic resource from the kernel (yet),
+	 * so skip the prepare phase.
+	 */
+	return 0;
+}
+
+int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+			   const struct switchdev_obj_port_fdb *fdb,
+			   struct switchdev_trans *trans)
+{
+	int state = is_multicast_ether_addr(fdb->addr) ?
 		GLOBAL_ATU_DATA_STATE_MC_STATIC :
 		GLOBAL_ATU_DATA_STATE_UC_STATIC;
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
+	ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state);
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
 }
 
 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
-			   const unsigned char *addr, u16 vid)
+			   const struct switchdev_obj_port_fdb *fdb)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
+	ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
 				       GLOBAL_ATU_DATA_STATE_UNUSED);
 	mutex_unlock(&ps->smi_mutex);
 
@@ -1938,7 +1687,6 @@
 }
 
 static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
-				  const unsigned char *addr,
 				  struct mv88e6xxx_atu_entry *entry)
 {
 	struct mv88e6xxx_atu_entry next = { 0 };
@@ -1950,10 +1698,6 @@
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_atu_mac_write(ds, addr);
-	if (ret < 0)
-		return ret;
-
 	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
 	if (ret < 0)
 		return ret;
@@ -1991,51 +1735,69 @@
 	return 0;
 }
 
-/* get next entry for port */
-int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
-			       unsigned char *addr, u16 *vid, bool *is_static)
+int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+			    struct switchdev_obj_port_fdb *fdb,
+			    int (*cb)(struct switchdev_obj *obj))
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	struct mv88e6xxx_atu_entry next;
-	u16 fid;
-	int ret;
+	struct mv88e6xxx_vtu_stu_entry vlan = {
+		.vid = GLOBAL_VTU_VID_MASK, /* all ones */
+	};
+	int err;
 
 	mutex_lock(&ps->smi_mutex);
 
-	ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
-	if (ret < 0)
+	err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
+	if (err)
 		goto unlock;
-	fid = ret;
 
 	do {
-		if (is_broadcast_ether_addr(addr)) {
-			struct mv88e6xxx_vtu_stu_entry vtu;
+		struct mv88e6xxx_atu_entry addr = {
+			.mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+		};
 
-			ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
-			if (ret < 0)
-				goto unlock;
-
-			*vid = vtu.vid;
-			fid = vtu.fid;
-		}
-
-		ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
-		if (ret < 0)
+		err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+		if (err)
 			goto unlock;
 
-		ether_addr_copy(addr, next.mac);
+		if (!vlan.valid)
+			break;
 
-		if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
-			continue;
-	} while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
+		err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
+		if (err)
+			goto unlock;
 
-	*is_static = next.state == (is_multicast_ether_addr(addr) ?
-				    GLOBAL_ATU_DATA_STATE_MC_STATIC :
-				    GLOBAL_ATU_DATA_STATE_UC_STATIC);
+		do {
+			err = _mv88e6xxx_atu_getnext(ds, vlan.fid, &addr);
+			if (err)
+				goto unlock;
+
+			if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+				break;
+
+			if (!addr.trunk && addr.portv_trunkid & BIT(port)) {
+				bool is_static = addr.state ==
+					(is_multicast_ether_addr(addr.mac) ?
+					 GLOBAL_ATU_DATA_STATE_MC_STATIC :
+					 GLOBAL_ATU_DATA_STATE_UC_STATIC);
+
+				fdb->vid = vlan.vid;
+				ether_addr_copy(fdb->addr, addr.mac);
+				fdb->ndm_state = is_static ? NUD_NOARP :
+					NUD_REACHABLE;
+
+				err = cb(&fdb->obj);
+				if (err)
+					goto unlock;
+			}
+		} while (!is_broadcast_ether_addr(addr.mac));
+
+	} while (vlan.vid < GLOBAL_VTU_VID_MASK);
+
 unlock:
 	mutex_unlock(&ps->smi_mutex);
 
-	return ret;
+	return err;
 }
 
 static void mv88e6xxx_bridge_work(struct work_struct *work)
@@ -2057,7 +1819,7 @@
 static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret, fid;
+	int ret;
 	u16 reg;
 
 	mutex_lock(&ps->smi_mutex);
@@ -2125,6 +1887,8 @@
 				reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
 			else
 				reg |= PORT_CONTROL_FRAME_MODE_DSA;
+			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+				PORT_CONTROL_FORWARD_UNKNOWN_MC;
 		}
 
 		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
@@ -2181,7 +1945,7 @@
 			reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
 	}
 
-	reg |= PORT_CONTROL_2_8021Q_FALLBACK;
+	reg |= PORT_CONTROL_2_8021Q_SECURE;
 
 	if (reg) {
 		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
@@ -2274,19 +2038,11 @@
 	if (ret)
 		goto abort;
 
-	/* Port based VLAN map: give each port its own address
-	 * database, allow the CPU port to talk to each of the 'real'
-	 * ports, and allow each of the 'real' ports to only talk to
-	 * the upstream port.
+	/* Port based VLAN map: do not give each port its own address
+	 * database, and allow every port to egress frames on all other ports.
 	 */
-	fid = port + 1;
-	ps->fid[port] = fid;
-	set_bit(fid, ps->fid_bitmap);
-
-	if (!dsa_is_cpu_port(ds, port))
-		ps->bridge_mask[fid] = 1 << port;
-
-	ret = _mv88e6xxx_update_port_config(ds, port);
+	reg = BIT(ps->num_ports) - 1; /* all ports */
+	ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port);
 	if (ret)
 		goto abort;
 
@@ -2314,273 +2070,9 @@
 	return 0;
 }
 
-static int mv88e6xxx_regs_show(struct seq_file *s, void *p)
-{
-	struct dsa_switch *ds = s->private;
-
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int reg, port;
-
-	seq_puts(s, "    GLOBAL GLOBAL2 ");
-	for (port = 0 ; port < ps->num_ports; port++)
-		seq_printf(s, " %2d  ", port);
-	seq_puts(s, "\n");
-
-	for (reg = 0; reg < 32; reg++) {
-		seq_printf(s, "%2x: ", reg);
-		seq_printf(s, " %4x    %4x  ",
-			   mv88e6xxx_reg_read(ds, REG_GLOBAL, reg),
-			   mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg));
-
-		for (port = 0 ; port < ps->num_ports; port++)
-			seq_printf(s, "%4x ",
-				   mv88e6xxx_reg_read(ds, REG_PORT(port), reg));
-		seq_puts(s, "\n");
-	}
-
-	return 0;
-}
-
-static int mv88e6xxx_regs_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mv88e6xxx_regs_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_regs_fops = {
-	.open   = mv88e6xxx_regs_open,
-	.read   = seq_read,
-	.llseek = no_llseek,
-	.release = single_release,
-	.owner  = THIS_MODULE,
-};
-
-static void mv88e6xxx_atu_show_header(struct seq_file *s)
-{
-	seq_puts(s, "DB   T/P  Vec State Addr\n");
-}
-
-static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum,
-				     unsigned char *addr, int data)
-{
-	bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK);
-	int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >>
-		       GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT);
-	int state = data & GLOBAL_ATU_DATA_STATE_MASK;
-
-	seq_printf(s, "%03x %5s %10pb   %x   %pM\n",
-		   dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr);
-}
-
-static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
-				 int dbnum)
-{
-	unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-	unsigned char addr[6];
-	int ret, data, state;
-
-	ret = _mv88e6xxx_atu_mac_write(ds, bcast);
-	if (ret < 0)
-		return ret;
-
-	do {
-		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
-					   dbnum);
-		if (ret < 0)
-			return ret;
-
-		ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
-		if (ret < 0)
-			return ret;
-
-		data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
-		if (data < 0)
-			return data;
-
-		state = data & GLOBAL_ATU_DATA_STATE_MASK;
-		if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
-			break;
-		ret = _mv88e6xxx_atu_mac_read(ds, addr);
-		if (ret < 0)
-			return ret;
-		mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
-	} while (state != GLOBAL_ATU_DATA_STATE_UNUSED);
-
-	return 0;
-}
-
-static int mv88e6xxx_atu_show(struct seq_file *s, void *p)
-{
-	struct dsa_switch *ds = s->private;
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int dbnum;
-
-	mv88e6xxx_atu_show_header(s);
-
-	for (dbnum = 0; dbnum < 255; dbnum++) {
-		mutex_lock(&ps->smi_mutex);
-		mv88e6xxx_atu_show_db(s, ds, dbnum);
-		mutex_unlock(&ps->smi_mutex);
-	}
-
-	return 0;
-}
-
-static int mv88e6xxx_atu_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mv88e6xxx_atu_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_atu_fops = {
-	.open   = mv88e6xxx_atu_open,
-	.read   = seq_read,
-	.llseek = no_llseek,
-	.release = single_release,
-	.owner  = THIS_MODULE,
-};
-
-static void mv88e6xxx_stats_show_header(struct seq_file *s,
-					struct mv88e6xxx_priv_state *ps)
-{
-	int port;
-
-	seq_puts(s, "      Statistic       ");
-	for (port = 0 ; port < ps->num_ports; port++)
-		seq_printf(s, "Port %2d  ", port);
-	seq_puts(s, "\n");
-}
-
-static int mv88e6xxx_stats_show(struct seq_file *s, void *p)
-{
-	struct dsa_switch *ds = s->private;
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats;
-	int port, stat, max_stats;
-	uint64_t value;
-
-	if (have_sw_in_discards(ds))
-		max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats);
-	else
-		max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
-
-	mv88e6xxx_stats_show_header(s, ps);
-
-	mutex_lock(&ps->smi_mutex);
-
-	for (stat = 0; stat < max_stats; stat++) {
-		seq_printf(s, "%19s: ", stats[stat].string);
-		for (port = 0 ; port < ps->num_ports; port++) {
-			_mv88e6xxx_stats_snapshot(ds, port);
-			value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats,
-							    port);
-			seq_printf(s, "%8llu ", value);
-		}
-		seq_puts(s, "\n");
-	}
-	mutex_unlock(&ps->smi_mutex);
-
-	return 0;
-}
-
-static int mv88e6xxx_stats_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mv88e6xxx_stats_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_stats_fops = {
-	.open   = mv88e6xxx_stats_open,
-	.read   = seq_read,
-	.llseek = no_llseek,
-	.release = single_release,
-	.owner  = THIS_MODULE,
-};
-
-static int mv88e6xxx_device_map_show(struct seq_file *s, void *p)
-{
-	struct dsa_switch *ds = s->private;
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int target, ret;
-
-	seq_puts(s, "Target Port\n");
-
-	mutex_lock(&ps->smi_mutex);
-	for (target = 0; target < 32; target++) {
-		ret = _mv88e6xxx_reg_write(
-			ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
-			target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT);
-		if (ret < 0)
-			goto out;
-		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
-					  GLOBAL2_DEVICE_MAPPING);
-		seq_printf(s, "  %2d   %2d\n", target,
-			   ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK);
-	}
-out:
-	mutex_unlock(&ps->smi_mutex);
-
-	return 0;
-}
-
-static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mv88e6xxx_device_map_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_device_map_fops = {
-	.open   = mv88e6xxx_device_map_open,
-	.read   = seq_read,
-	.llseek = no_llseek,
-	.release = single_release,
-	.owner  = THIS_MODULE,
-};
-
-static int mv88e6xxx_scratch_show(struct seq_file *s, void *p)
-{
-	struct dsa_switch *ds = s->private;
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int reg, ret;
-
-	seq_puts(s, "Register Value\n");
-
-	mutex_lock(&ps->smi_mutex);
-	for (reg = 0; reg < 0x80; reg++) {
-		ret = _mv88e6xxx_reg_write(
-			ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
-			reg << GLOBAL2_SCRATCH_REGISTER_SHIFT);
-		if (ret < 0)
-			goto out;
-
-		ret = _mv88e6xxx_scratch_wait(ds);
-		if (ret < 0)
-			goto out;
-
-		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
-					  GLOBAL2_SCRATCH_MISC);
-		seq_printf(s, "  %2x   %2x\n", reg,
-			   ret & GLOBAL2_SCRATCH_VALUE_MASK);
-	}
-out:
-	mutex_unlock(&ps->smi_mutex);
-
-	return 0;
-}
-
-static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, mv88e6xxx_scratch_show, inode->i_private);
-}
-
-static const struct file_operations mv88e6xxx_scratch_fops = {
-	.open   = mv88e6xxx_scratch_open,
-	.read   = seq_read,
-	.llseek = no_llseek,
-	.release = single_release,
-	.owner  = THIS_MODULE,
-};
-
 int mv88e6xxx_setup_common(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	char *name;
 
 	mutex_init(&ps->smi_mutex);
 
@@ -2588,24 +2080,6 @@
 
 	INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
 
-	name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
-	ps->dbgfs = debugfs_create_dir(name, NULL);
-	kfree(name);
-
-	debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds,
-			    &mv88e6xxx_regs_fops);
-
-	debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds,
-			    &mv88e6xxx_atu_fops);
-
-	debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds,
-			    &mv88e6xxx_stats_fops);
-
-	debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds,
-			    &mv88e6xxx_device_map_fops);
-
-	debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds,
-			    &mv88e6xxx_scratch_fops);
 	return 0;
 }
 
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 1cf3cf8..6f9ed5d 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -402,18 +402,10 @@
 	int		id; /* switch product id */
 	int		num_ports;	/* number of switch ports */
 
-	/* hw bridging */
-
-	DECLARE_BITMAP(fid_bitmap, VLAN_N_VID);	/* FIDs 1 to 4095 available */
-	u16 fid[DSA_MAX_PORTS];			/* per (non-bridged) port FID */
-	u16 bridge_mask[DSA_MAX_PORTS];		/* br groups (indexed by FID) */
-
 	unsigned long port_state_update_mask;
 	u8 port_state[DSA_MAX_PORTS];
 
 	struct work_struct bridge_work;
-
-	struct dentry *dbgfs;
 };
 
 struct mv88e6xxx_hw_stat {
@@ -442,7 +434,6 @@
 int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
 int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
 			    int regnum, u16 val);
-void mv88e6xxx_poll_link(struct dsa_switch *ds);
 void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
 void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
 				 uint64_t *data);
@@ -465,8 +456,6 @@
 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
 int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
 		      struct phy_device *phydev, struct ethtool_eee *e);
-int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
-int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
 int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *vid);
 int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 vid);
@@ -475,12 +464,17 @@
 int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid);
 int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
 			   unsigned long *ports, unsigned long *untagged);
+int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
+			       const struct switchdev_obj_port_fdb *fdb,
+			       struct switchdev_trans *trans);
 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
-			   const unsigned char *addr, u16 vid);
+			   const struct switchdev_obj_port_fdb *fdb,
+			   struct switchdev_trans *trans);
 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
-			   const unsigned char *addr, u16 vid);
-int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
-			       unsigned char *addr, u16 *vid, bool *is_static);
+			   const struct switchdev_obj_port_fdb *fdb);
+int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+			    struct switchdev_obj_port_fdb *fdb,
+			    int (*cb)(struct switchdev_obj *obj));
 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
 			     int reg, int val);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 815eb94..69fc840 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -147,8 +147,12 @@
 	dev->flags |= IFF_NOARP;
 	dev->flags &= ~IFF_MULTICAST;
 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
-	dev->features	|= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
+	dev->features	|= NETIF_F_SG | NETIF_F_FRAGLIST;
+	dev->features	|= NETIF_F_ALL_TSO | NETIF_F_UFO;
 	dev->features	|= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+	dev->features	|= NETIF_F_GSO_ENCAP_ALL;
+	dev->hw_features |= dev->features;
+	dev->hw_enc_features |= dev->features;
 	eth_hw_addr_random(dev);
 }
 
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index edf7225..29c3075 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -64,7 +64,7 @@
 	  should say Y to this option if you wish to use it with Linux.
 
 config MAC8390
-	bool "Macintosh NS 8390 based ethernet cards"
+	tristate "Macintosh NS 8390 based ethernet cards"
 	depends on MAC
 	select CRC32
 	---help---
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 65cf60f..b928390 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -454,34 +454,22 @@
 MODULE_DESCRIPTION("Macintosh NS8390-based Nubus Ethernet driver");
 MODULE_LICENSE("GPL");
 
-/* overkill, of course */
-static struct net_device *dev_mac8390[15];
-int init_module(void)
+static struct net_device *dev_mac8390;
+
+int __init init_module(void)
 {
-	int i;
-	for (i = 0; i < 15; i++) {
-		struct net_device *dev = mac8390_probe(-1);
-		if (IS_ERR(dev))
-			break;
-		dev_mac890[i] = dev;
-	}
-	if (!i) {
-		pr_notice("No useable cards found, driver NOT installed.\n");
-		return -ENODEV;
+	dev_mac8390 = mac8390_probe(-1);
+	if (IS_ERR(dev_mac8390)) {
+		pr_warn("mac8390: No card found\n");
+		return PTR_ERR(dev_mac8390);
 	}
 	return 0;
 }
 
-void cleanup_module(void)
+void __exit cleanup_module(void)
 {
-	int i;
-	for (i = 0; i < 15; i++) {
-		struct net_device *dev = dev_mac890[i];
-		if (dev) {
-			unregister_netdev(dev);
-			free_netdev(dev);
-		}
-	}
+	unregister_netdev(dev_mac8390);
+	free_netdev(dev_mac8390);
 }
 
 #endif /* MODULE */
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index ae89de7..20bf55d 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1141,8 +1141,6 @@
 	strlcpy(info->version, "revision: 1.0", sizeof(info->version));
 	strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
 	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
-	info->eedump_len = 0;
-	info->regdump_len = sizeof(struct greth_regs);
 }
 
 static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 48ce83e..8d50314 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -847,21 +847,25 @@
 	if (ndev->irq == -ENXIO) {
 		netdev_err(ndev, "No irq resource\n");
 		ret = ndev->irq;
-		goto out;
+		goto out_iounmap;
 	}
 
 	db->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(db->clk)) {
 		ret = PTR_ERR(db->clk);
-		goto out;
+		goto out_iounmap;
 	}
 
-	clk_prepare_enable(db->clk);
+	ret = clk_prepare_enable(db->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
+		goto out_iounmap;
+	}
 
 	ret = sunxi_sram_claim(&pdev->dev);
 	if (ret) {
 		dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
-		goto out;
+		goto out_clk_disable_unprepare;
 	}
 
 	db->phy_node = of_parse_phandle(np, "phy", 0);
@@ -910,6 +914,10 @@
 
 out_release_sram:
 	sunxi_sram_release(&pdev->dev);
+out_clk_disable_unprepare:
+	clk_disable_unprepare(db->clk);
+out_iounmap:
+	iounmap(db->membase);
 out:
 	dev_err(db->dev, "not found (%d).\n", ret);
 
@@ -921,8 +929,12 @@
 static int emac_remove(struct platform_device *pdev)
 {
 	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct emac_board_info *db = netdev_priv(ndev);
 
 	unregister_netdev(ndev);
+	sunxi_sram_release(&pdev->dev);
+	clk_disable_unprepare(db->clk);
+	iounmap(db->membase);
 	free_netdev(ndev);
 
 	dev_dbg(&pdev->dev, "released and freed device\n");
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 98a10d5..66d0b73c 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -661,6 +661,7 @@
 	spin_unlock(&lp->devlock);
 	lance_interrupt(dev->irq, dev);
 }
+EXPORT_SYMBOL_GPL(lance_poll);
 #endif
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index afc62ea..0038709 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -100,7 +100,7 @@
 	  DEPCA series.  (This chipset is better known via the NE2100 cards.)
 
 config HPLANCE
-	bool "HP on-board LANCE support"
+	tristate "HP on-board LANCE support"
 	depends on DIO
 	select CRC32
 	---help---
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index cb367cc..5330bcb 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -714,7 +714,6 @@
 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 	snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
 		 aup->mac_id);
-	info->regdump_len = 0;
 }
 
 static void au1000_set_msglevel(struct net_device *dev, u32 value)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 2c063b6..96f485a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -327,9 +327,13 @@
 	pdata->debugfs_xpcs_reg = 0;
 
 	buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
+	if (!buf)
+		return;
+
 	pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
 	if (!pdata->xgbe_debugfs) {
 		netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
+		kfree(buf);
 		return;
 	}
 
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index a4473d8..112f1bc 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1595,7 +1595,7 @@
 				  packet->rdesc_count, 1);
 
 	/* Make sure ownership is written to the descriptor */
-	dma_wmb();
+	wmb();
 
 	ring->cur = cur_index + 1;
 	if (!packet->skb->xmit_more ||
@@ -1940,84 +1940,31 @@
 static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
 						  unsigned int queue_count)
 {
-	unsigned int q_fifo_size = 0;
-	enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+	unsigned int q_fifo_size;
+	unsigned int p_fifo;
 
-	/* Calculate Tx/Rx fifo share per queue */
-	switch (fifo_size) {
-	case 0:
-		q_fifo_size = XGBE_FIFO_SIZE_B(128);
-		break;
-	case 1:
-		q_fifo_size = XGBE_FIFO_SIZE_B(256);
-		break;
-	case 2:
-		q_fifo_size = XGBE_FIFO_SIZE_B(512);
-		break;
-	case 3:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(1);
-		break;
-	case 4:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(2);
-		break;
-	case 5:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(4);
-		break;
-	case 6:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(8);
-		break;
-	case 7:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(16);
-		break;
-	case 8:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(32);
-		break;
-	case 9:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(64);
-		break;
-	case 10:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(128);
-		break;
-	case 11:
-		q_fifo_size = XGBE_FIFO_SIZE_KB(256);
-		break;
-	}
+	/* Calculate the configured fifo size */
+	q_fifo_size = 1 << (fifo_size + 7);
 
-	/* The configured value is not the actual amount of fifo RAM */
+	/* The configured value may not be the actual amount of fifo RAM */
 	q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
 
 	q_fifo_size = q_fifo_size / queue_count;
 
-	/* Set the queue fifo size programmable value */
-	if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_512;
-	else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
-		p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+	/* Each increment in the queue fifo size represents 256 bytes of
+	 * fifo, with 0 representing 256 bytes. Distribute the fifo equally
+	 * between the queues.
+	 */
+	p_fifo = q_fifo_size / 256;
+	if (p_fifo)
+		p_fifo--;
 
 	return p_fifo;
 }
 
 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
 {
-	enum xgbe_mtl_fifo_size fifo_size;
+	unsigned int fifo_size;
 	unsigned int i;
 
 	fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
@@ -2033,7 +1980,7 @@
 
 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
 {
-	enum xgbe_mtl_fifo_size fifo_size;
+	unsigned int fifo_size;
 	unsigned int i;
 
 	fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
@@ -2224,7 +2171,7 @@
 
 	default:
 		read_hi = false;
-	};
+	}
 
 	val = XGMAC_IOREAD(pdata, reg_lo);
 
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index aae9d5e..cff8940 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -360,6 +360,9 @@
 			}
 		}
 
+		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
+			pdata->ext_stats.rx_buffer_unavailable++;
+
 		/* Restart the device on a Fatal Bus Error */
 		if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
 			schedule_work(&pdata->restart_work);
@@ -384,7 +387,8 @@
 				/* Read Tx Timestamp to clear interrupt */
 				pdata->tx_tstamp =
 					hw_if->get_tx_tstamp(pdata);
-				schedule_work(&pdata->tx_tstamp_work);
+				queue_work(pdata->dev_workqueue,
+					   &pdata->tx_tstamp_work);
 			}
 		}
 	}
@@ -450,7 +454,7 @@
 {
 	struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
 
-	schedule_work(&pdata->service_work);
+	queue_work(pdata->dev_workqueue, &pdata->service_work);
 
 	mod_timer(&pdata->service_timer, jiffies + HZ);
 }
@@ -891,7 +895,7 @@
 	netif_tx_start_all_queues(netdev);
 
 	xgbe_start_timers(pdata);
-	schedule_work(&pdata->service_work);
+	queue_work(pdata->dev_workqueue, &pdata->service_work);
 
 	DBGPR("<--xgbe_start\n");
 
@@ -1807,6 +1811,7 @@
 	struct netdev_queue *txq;
 	int processed = 0;
 	unsigned int tx_packets = 0, tx_bytes = 0;
+	unsigned int cur;
 
 	DBGPR("-->xgbe_tx_poll\n");
 
@@ -1814,10 +1819,11 @@
 	if (!ring)
 		return 0;
 
+	cur = ring->cur;
 	txq = netdev_get_tx_queue(netdev, channel->queue_index);
 
 	while ((processed < XGBE_TX_DESC_MAX_PROC) &&
-	       (ring->dirty != ring->cur)) {
+	       (ring->dirty != cur)) {
 		rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
 		rdesc = rdata->rdesc;
 
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 59e090e..6040293 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -179,6 +179,7 @@
 	XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
 	XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
 	XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
+	XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
 };
 
 #define XGBE_STATS_COUNT	ARRAY_SIZE(xgbe_gstring_stats)
@@ -187,8 +188,6 @@
 {
 	int i;
 
-	DBGPR("-->%s\n", __func__);
-
 	switch (stringset) {
 	case ETH_SS_STATS:
 		for (i = 0; i < XGBE_STATS_COUNT; i++) {
@@ -198,8 +197,6 @@
 		}
 		break;
 	}
-
-	DBGPR("<--%s\n", __func__);
 }
 
 static void xgbe_get_ethtool_stats(struct net_device *netdev,
@@ -209,23 +206,17 @@
 	u8 *stat;
 	int i;
 
-	DBGPR("-->%s\n", __func__);
-
 	pdata->hw_if.read_mmc_stats(pdata);
 	for (i = 0; i < XGBE_STATS_COUNT; i++) {
 		stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
 		*data++ = *(u64 *)stat;
 	}
-
-	DBGPR("<--%s\n", __func__);
 }
 
 static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
 {
 	int ret;
 
-	DBGPR("-->%s\n", __func__);
-
 	switch (stringset) {
 	case ETH_SS_STATS:
 		ret = XGBE_STATS_COUNT;
@@ -235,8 +226,6 @@
 		ret = -EOPNOTSUPP;
 	}
 
-	DBGPR("<--%s\n", __func__);
-
 	return ret;
 }
 
@@ -245,13 +234,9 @@
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 
-	DBGPR("-->xgbe_get_pauseparam\n");
-
 	pause->autoneg = pdata->phy.pause_autoneg;
 	pause->tx_pause = pdata->phy.tx_pause;
 	pause->rx_pause = pdata->phy.rx_pause;
-
-	DBGPR("<--xgbe_get_pauseparam\n");
 }
 
 static int xgbe_set_pauseparam(struct net_device *netdev,
@@ -260,13 +245,11 @@
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 	int ret = 0;
 
-	DBGPR("-->xgbe_set_pauseparam\n");
-
-	DBGPR("  autoneg = %d, tx_pause = %d, rx_pause = %d\n",
-	      pause->autoneg, pause->tx_pause, pause->rx_pause);
-
-	if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE))
+	if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
+		netdev_err(netdev,
+			   "autoneg disabled, pause autoneg not avialable\n");
 		return -EINVAL;
+	}
 
 	pdata->phy.pause_autoneg = pause->autoneg;
 	pdata->phy.tx_pause = pause->tx_pause;
@@ -286,8 +269,6 @@
 	if (netif_running(netdev))
 		ret = pdata->phy_if.phy_config_aneg(pdata);
 
-	DBGPR("<--xgbe_set_pauseparam\n");
-
 	return ret;
 }
 
@@ -296,8 +277,6 @@
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 
-	DBGPR("-->xgbe_get_settings\n");
-
 	cmd->phy_address = pdata->phy.address;
 
 	cmd->supported = pdata->phy.supported;
@@ -311,8 +290,6 @@
 	cmd->port = PORT_NONE;
 	cmd->transceiver = XCVR_INTERNAL;
 
-	DBGPR("<--xgbe_get_settings\n");
-
 	return 0;
 }
 
@@ -323,16 +300,20 @@
 	u32 speed;
 	int ret;
 
-	DBGPR("-->xgbe_set_settings\n");
-
 	speed = ethtool_cmd_speed(cmd);
 
-	if (cmd->phy_address != pdata->phy.address)
+	if (cmd->phy_address != pdata->phy.address) {
+		netdev_err(netdev, "invalid phy address %hhu\n",
+			   cmd->phy_address);
 		return -EINVAL;
+	}
 
 	if ((cmd->autoneg != AUTONEG_ENABLE) &&
-	    (cmd->autoneg != AUTONEG_DISABLE))
+	    (cmd->autoneg != AUTONEG_DISABLE)) {
+		netdev_err(netdev, "unsupported autoneg %hhu\n",
+			   cmd->autoneg);
 		return -EINVAL;
+	}
 
 	if (cmd->autoneg == AUTONEG_DISABLE) {
 		switch (speed) {
@@ -341,16 +322,27 @@
 		case SPEED_1000:
 			break;
 		default:
+			netdev_err(netdev, "unsupported speed %u\n", speed);
 			return -EINVAL;
 		}
 
-		if (cmd->duplex != DUPLEX_FULL)
+		if (cmd->duplex != DUPLEX_FULL) {
+			netdev_err(netdev, "unsupported duplex %hhu\n",
+				   cmd->duplex);
 			return -EINVAL;
+		}
 	}
 
+	netif_dbg(pdata, link, netdev,
+		  "requested advertisement %#x, phy supported %#x\n",
+		  cmd->advertising, pdata->phy.supported);
+
 	cmd->advertising &= pdata->phy.supported;
-	if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
+	if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) {
+		netdev_err(netdev,
+			   "unsupported requested advertisement\n");
 		return -EINVAL;
+	}
 
 	ret = 0;
 	pdata->phy.autoneg = cmd->autoneg;
@@ -366,8 +358,6 @@
 	if (netif_running(netdev))
 		ret = pdata->phy_if.phy_config_aneg(pdata);
 
-	DBGPR("<--xgbe_set_settings\n");
-
 	return ret;
 }
 
@@ -385,7 +375,20 @@
 		 XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
 		 XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
 		 XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
-	drvinfo->n_stats = XGBE_STATS_COUNT;
+}
+
+static u32 xgbe_get_msglevel(struct net_device *netdev)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+	return pdata->msg_enable;
+}
+
+static void xgbe_set_msglevel(struct net_device *netdev, u32 msglevel)
+{
+	struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+	pdata->msg_enable = msglevel;
 }
 
 static int xgbe_get_coalesce(struct net_device *netdev,
@@ -393,8 +396,6 @@
 {
 	struct xgbe_prv_data *pdata = netdev_priv(netdev);
 
-	DBGPR("-->xgbe_get_coalesce\n");
-
 	memset(ec, 0, sizeof(struct ethtool_coalesce));
 
 	ec->rx_coalesce_usecs = pdata->rx_usecs;
@@ -402,8 +403,6 @@
 
 	ec->tx_max_coalesced_frames = pdata->tx_frames;
 
-	DBGPR("<--xgbe_get_coalesce\n");
-
 	return 0;
 }
 
@@ -415,8 +414,6 @@
 	unsigned int rx_frames, rx_riwt, rx_usecs;
 	unsigned int tx_frames;
 
-	DBGPR("-->xgbe_set_coalesce\n");
-
 	/* Check for not supported parameters  */
 	if ((ec->rx_coalesce_usecs_irq) ||
 	    (ec->rx_max_coalesced_frames_irq) ||
@@ -436,8 +433,10 @@
 	    (ec->rx_max_coalesced_frames_high) ||
 	    (ec->tx_coalesce_usecs_high) ||
 	    (ec->tx_max_coalesced_frames_high) ||
-	    (ec->rate_sample_interval))
+	    (ec->rate_sample_interval)) {
+		netdev_err(netdev, "unsupported coalescing parameter\n");
 		return -EOPNOTSUPP;
+	}
 
 	rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
 	rx_usecs = ec->rx_coalesce_usecs;
@@ -449,13 +448,13 @@
 
 	/* Check the bounds of values for Rx */
 	if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
-		netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
-			     hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
+		netdev_err(netdev, "rx-usec is limited to %d usecs\n",
+			   hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
 		return -EINVAL;
 	}
 	if (rx_frames > pdata->rx_desc_count) {
-		netdev_alert(netdev, "rx-frames is limited to %d frames\n",
-			     pdata->rx_desc_count);
+		netdev_err(netdev, "rx-frames is limited to %d frames\n",
+			   pdata->rx_desc_count);
 		return -EINVAL;
 	}
 
@@ -463,8 +462,8 @@
 
 	/* Check the bounds of values for Tx */
 	if (tx_frames > pdata->tx_desc_count) {
-		netdev_alert(netdev, "tx-frames is limited to %d frames\n",
-			     pdata->tx_desc_count);
+		netdev_err(netdev, "tx-frames is limited to %d frames\n",
+			   pdata->tx_desc_count);
 		return -EINVAL;
 	}
 
@@ -476,8 +475,6 @@
 	pdata->tx_frames = tx_frames;
 	hw_if->config_tx_coalesce(pdata);
 
-	DBGPR("<--xgbe_set_coalesce\n");
-
 	return 0;
 }
 
@@ -539,8 +536,10 @@
 	struct xgbe_hw_if *hw_if = &pdata->hw_if;
 	unsigned int ret;
 
-	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+		netdev_err(netdev, "unsupported hash function\n");
 		return -EOPNOTSUPP;
+	}
 
 	if (indir) {
 		ret = hw_if->set_rss_lookup_table(pdata, indir);
@@ -594,6 +593,8 @@
 	.get_settings = xgbe_get_settings,
 	.set_settings = xgbe_set_settings,
 	.get_drvinfo = xgbe_get_drvinfo,
+	.get_msglevel = xgbe_get_msglevel,
+	.set_msglevel = xgbe_set_msglevel,
 	.get_link = ethtool_op_get_link,
 	.get_coalesce = xgbe_get_coalesce,
 	.set_coalesce = xgbe_set_coalesce,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index e83bd76..7dd8933 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -371,7 +371,7 @@
 	set_bit(XGBE_DOWN, &pdata->dev_state);
 
 	/* Check if we should use ACPI or DT */
-	pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
+	pdata->use_acpi = dev->of_node ? 0 : 1;
 
 	phy_pdev = xgbe_get_phy_pdev(pdata);
 	if (!phy_pdev) {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 9088c3a..4460580 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1115,8 +1115,7 @@
 	unsigned int reg, link_aneg;
 
 	if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
-		if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
-			netif_carrier_off(pdata->netdev);
+		netif_carrier_off(pdata->netdev);
 
 		pdata->phy.link = 0;
 		goto adjust_link;
@@ -1142,10 +1141,7 @@
 		if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
 			clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
 
-		if (!test_bit(XGBE_LINK, &pdata->dev_state)) {
-			set_bit(XGBE_LINK, &pdata->dev_state);
-			netif_carrier_on(pdata->netdev);
-		}
+		netif_carrier_on(pdata->netdev);
 	} else {
 		if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
 			xgbe_check_link_timeout(pdata);
@@ -1156,10 +1152,7 @@
 
 		xgbe_phy_status_aneg(pdata);
 
-		if (test_bit(XGBE_LINK, &pdata->dev_state)) {
-			clear_bit(XGBE_LINK, &pdata->dev_state);
-			netif_carrier_off(pdata->netdev);
-		}
+		netif_carrier_off(pdata->netdev);
 	}
 
 adjust_link:
@@ -1179,8 +1172,7 @@
 	devm_free_irq(pdata->dev, pdata->an_irq, pdata);
 
 	pdata->phy.link = 0;
-	if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
-		netif_carrier_off(pdata->netdev);
+	netif_carrier_off(pdata->netdev);
 
 	xgbe_phy_adjust_link(pdata);
 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 8c9d01e..e234b99 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -209,8 +209,6 @@
 #define XGMAC_IOCTL_CONTEXT	2
 
 #define XGBE_FIFO_MAX		81920
-#define XGBE_FIFO_SIZE_B(x)	(x)
-#define XGBE_FIFO_SIZE_KB(x)	(x * 1024)
 
 #define XGBE_TC_MIN_QUANTUM	10
 
@@ -461,7 +459,6 @@
 
 enum xgbe_state {
 	XGBE_DOWN,
-	XGBE_LINK,
 	XGBE_LINK_INIT,
 	XGBE_LINK_ERR,
 };
@@ -483,20 +480,6 @@
 	XGMAC_INT_STATE_RESTORE,
 };
 
-enum xgbe_mtl_fifo_size {
-	XGMAC_MTL_FIFO_SIZE_256  = 0x00,
-	XGMAC_MTL_FIFO_SIZE_512  = 0x01,
-	XGMAC_MTL_FIFO_SIZE_1K   = 0x03,
-	XGMAC_MTL_FIFO_SIZE_2K   = 0x07,
-	XGMAC_MTL_FIFO_SIZE_4K   = 0x0f,
-	XGMAC_MTL_FIFO_SIZE_8K   = 0x1f,
-	XGMAC_MTL_FIFO_SIZE_16K  = 0x3f,
-	XGMAC_MTL_FIFO_SIZE_32K  = 0x7f,
-	XGMAC_MTL_FIFO_SIZE_64K  = 0xff,
-	XGMAC_MTL_FIFO_SIZE_128K = 0x1ff,
-	XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
-};
-
 enum xgbe_speed {
 	XGBE_SPEED_1000 = 0,
 	XGBE_SPEED_2500,
@@ -598,6 +581,7 @@
 struct xgbe_ext_stats {
 	u64 tx_tso_packets;
 	u64 rx_split_header_packets;
+	u64 rx_buffer_unavailable;
 };
 
 struct xgbe_hw_if {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 652f218..33850a0 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -461,6 +461,7 @@
 
 static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
 {
+	struct device *dev = &pdata->pdev->dev;
 	u32 value, mc2;
 	u32 intf_ctl, rgmii;
 	u32 icm0, icm2;
@@ -490,7 +491,12 @@
 	default:
 		ENET_INTERFACE_MODE2_SET(&mc2, 2);
 		intf_ctl |= ENET_GHD_MODE;
-		CFG_TXCLK_MUXSEL0_SET(&rgmii, 4);
+
+		if (dev->of_node) {
+			CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
+			CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
+		}
+
 		xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
 		value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
 		xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index ff05bbc..6dee73c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -144,6 +144,7 @@
 #define CFG_BYPASS_UNISEC_RX		BIT(1)
 #define CFG_CLE_BYPASS_EN0		BIT(31)
 #define CFG_TXCLK_MUXSEL0_SET(dst, val)	xgene_set_bits(dst, val, 29, 3)
+#define CFG_RXCLK_MUXSEL0_SET(dst, val)	xgene_set_bits(dst, val, 26, 3)
 
 #define CFG_CLE_IP_PROTOCOL0_SET(dst, val)	xgene_set_bits(dst, val, 16, 2)
 #define CFG_CLE_DSTQID0_SET(dst, val)		xgene_set_bits(dst, val, 0, 12)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 6b1846d..ce10687 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1118,6 +1118,47 @@
 	return ret;
 }
 
+static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+	int delay, ret;
+
+	ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
+	if (ret) {
+		pdata->tx_delay = 4;
+		return 0;
+	}
+
+	if (delay < 0 || delay > 7) {
+		dev_err(dev, "Invalid tx-delay specified\n");
+		return -EINVAL;
+	}
+
+	pdata->tx_delay = delay;
+
+	return 0;
+}
+
+static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
+{
+	struct device *dev = &pdata->pdev->dev;
+	int delay, ret;
+
+	ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
+	if (ret) {
+		pdata->rx_delay = 2;
+		return 0;
+	}
+
+	if (delay < 0 || delay > 7) {
+		dev_err(dev, "Invalid rx-delay specified\n");
+		return -EINVAL;
+	}
+
+	pdata->rx_delay = delay;
+
+	return 0;
+}
 
 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
 {
@@ -1194,6 +1235,14 @@
 		return -ENODEV;
 	}
 
+	ret = xgene_get_tx_delay(pdata);
+	if (ret)
+		return ret;
+
+	ret = xgene_get_rx_delay(pdata);
+	if (ret)
+		return ret;
+
 	ret = platform_get_irq(pdev, 0);
 	if (ret <= 0) {
 		dev_err(dev, "Unable to get ENET Rx IRQ\n");
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index ff89a5d..a6e56b8 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -184,6 +184,8 @@
 	u8 bp_bufnum;
 	u16 ring_num;
 	u32 mss;
+	u8 tx_delay;
+	u8 rx_delay;
 };
 
 struct xgene_indirect_ctl {
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
index d19a41b..3107129 100644
--- a/drivers/net/ethernet/apple/Kconfig
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -51,7 +51,7 @@
 	  will be called bmac.
 
 config MACMACE
-	bool "Macintosh (AV) onboard MACE ethernet"
+	tristate "Macintosh (AV) onboard MACE ethernet"
 	depends on MAC
 	select CRC32
 	---help---
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 48694c2..872b7ab 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -233,10 +233,6 @@
 		sizeof(drvinfo->version));
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->n_stats = 0;
-	drvinfo->testinfo_len = 0;
-	drvinfo->regdump_len = atl1c_get_regs_len(netdev);
-	drvinfo->eedump_len = atl1c_get_eeprom_len(netdev);
 }
 
 static void atl1c_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 1be072f..8e3dbd4 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -316,10 +316,6 @@
 	strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->n_stats = 0;
-	drvinfo->testinfo_len = 0;
-	drvinfo->regdump_len = atl1e_get_regs_len(netdev);
-	drvinfo->eedump_len = atl1e_get_eeprom_len(netdev);
 }
 
 static void atl1e_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index eca1d11..529bca7 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3388,7 +3388,6 @@
 		sizeof(drvinfo->version));
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->eedump_len = ATL1_EEDUMP_LEN;
 }
 
 static void atl1_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 46a5353..8f76f45 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -2030,10 +2030,6 @@
 	strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->n_stats = 0;
-	drvinfo->testinfo_len = 0;
-	drvinfo->regdump_len = atl2_get_regs_len(netdev);
-	drvinfo->eedump_len = atl2_get_eeprom_len(netdev);
 }
 
 static void atl2_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index e930aa9..67a7d52 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -170,4 +170,23 @@
 	  Broadcom BCM7xxx Set Top Box family chipset using an internal
 	  Ethernet switch.
 
+config BNXT
+	tristate "Broadcom NetXtreme-C/E support"
+	depends on PCI
+	select FW_LOADER
+	select LIBCRC32C
+	---help---
+	  This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
+	  Ethernet cards.  To compile this driver as a module, choose M here:
+	  the module will be called bnxt_en.  This is recommended.
+
+config BNXT_SRIOV
+	bool "Broadcom NetXtreme-C/E SR-IOV support"
+	depends on BNXT && PCI_IOV
+	default y
+	---help---
+	  This configuration parameter enables Single Root Input Output
+	  Virtualization support in the NetXtreme-C/E products. This
+	  allows for virtual function acceleration in virtual environments.
+
 endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index e2a958a..00584d7 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -12,3 +12,4 @@
 obj-$(CONFIG_TIGON3) += tg3.o
 obj-$(CONFIG_BGMAC) += bgmac.o
 obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
+obj-$(CONFIG_BNXT) += bnxt/
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a7f2cc3..8b1929e 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1333,7 +1333,6 @@
 		sizeof(drvinfo->version));
 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
-	drvinfo->n_stats = BCM_ENET_STATS_LEN;
 }
 
 static int bcm_enet_get_sset_count(struct net_device *netdev,
@@ -2049,7 +2048,7 @@
 
 	for (i = 0; i < priv->num_ports; i++) {
 		struct bcm63xx_enetsw_port *port;
-		int val, j, up, advertise, lpa, lpa2, speed, duplex, media;
+		int val, j, up, advertise, lpa, speed, duplex, media;
 		int external_phy = bcm_enet_port_is_rgmii(i);
 		u8 override;
 
@@ -2092,22 +2091,27 @@
 		lpa = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
 					   MII_LPA);
 
-		lpa2 = bcmenet_sw_mdio_read(priv, external_phy, port->phy_id,
-					    MII_STAT1000);
-
 		/* figure out media and duplex from advertise and LPA values */
 		media = mii_nway_result(lpa & advertise);
 		duplex = (media & ADVERTISE_FULL) ? 1 : 0;
-		if (lpa2 & LPA_1000FULL)
-			duplex = 1;
 
-		if (lpa2 & (LPA_1000FULL | LPA_1000HALF))
-			speed = 1000;
-		else {
-			if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
-				speed = 100;
-			else
-				speed = 10;
+		if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF))
+			speed = 100;
+		else
+			speed = 10;
+
+		if (val & BMSR_ESTATEN) {
+			advertise = bcmenet_sw_mdio_read(priv, external_phy,
+						port->phy_id, MII_CTRL1000);
+
+			lpa = bcmenet_sw_mdio_read(priv, external_phy,
+						port->phy_id, MII_STAT1000);
+
+			if (advertise & (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+					&& lpa & (LPA_1000FULL | LPA_1000HALF)) {
+				speed = 1000;
+				duplex = (lpa & LPA_1000FULL);
+			}
 		}
 
 		dev_info(&priv->pdev->dev,
@@ -2597,7 +2601,6 @@
 	strncpy(drvinfo->version, bcm_enet_driver_version, 32);
 	strncpy(drvinfo->fw_version, "N/A", 32);
 	strncpy(drvinfo->bus_info, "bcm63xx", 32);
-	drvinfo->n_stats = BCM_ENETSW_STATS_LEN;
 }
 
 static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index f1b5364..8581063 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -287,7 +287,6 @@
 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
 	strlcpy(info->version, "0.1", sizeof(info->version));
 	strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
-	info->n_stats = BCM_SYSPORT_STATS_LEN;
 }
 
 static u32 bcm_sysport_get_msglvl(struct net_device *dev)
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2b66ef3..8fc3f3c 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -813,6 +813,46 @@
 }
 
 static void
+bnx2_free_stats_blk(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (bp->status_blk) {
+		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
+				  bp->status_blk,
+				  bp->status_blk_mapping);
+		bp->status_blk = NULL;
+		bp->stats_blk = NULL;
+	}
+}
+
+static int
+bnx2_alloc_stats_blk(struct net_device *dev)
+{
+	int status_blk_size;
+	void *status_blk;
+	struct bnx2 *bp = netdev_priv(dev);
+
+	/* Combine status and statistics blocks into one allocation. */
+	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
+	if (bp->flags & BNX2_FLAG_MSIX_CAP)
+		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
+						 BNX2_SBLK_MSIX_ALIGN_SIZE);
+	bp->status_stats_size = status_blk_size +
+				sizeof(struct statistics_block);
+	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+					 &bp->status_blk_mapping, GFP_KERNEL);
+	if (status_blk == NULL)
+		return -ENOMEM;
+
+	bp->status_blk = status_blk;
+	bp->stats_blk = status_blk + status_blk_size;
+	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
+
+	return 0;
+}
+
+static void
 bnx2_free_mem(struct bnx2 *bp)
 {
 	int i;
@@ -829,37 +869,19 @@
 			bp->ctx_blk[i] = NULL;
 		}
 	}
-	if (bnapi->status_blk.msi) {
-		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
-				  bnapi->status_blk.msi,
-				  bp->status_blk_mapping);
+
+	if (bnapi->status_blk.msi)
 		bnapi->status_blk.msi = NULL;
-		bp->stats_blk = NULL;
-	}
 }
 
 static int
 bnx2_alloc_mem(struct bnx2 *bp)
 {
-	int i, status_blk_size, err;
+	int i, err;
 	struct bnx2_napi *bnapi;
-	void *status_blk;
-
-	/* Combine status and statistics blocks into one allocation. */
-	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
-	if (bp->flags & BNX2_FLAG_MSIX_CAP)
-		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
-						 BNX2_SBLK_MSIX_ALIGN_SIZE);
-	bp->status_stats_size = status_blk_size +
-				sizeof(struct statistics_block);
-
-	status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
-					 &bp->status_blk_mapping, GFP_KERNEL);
-	if (status_blk == NULL)
-		goto alloc_mem_err;
 
 	bnapi = &bp->bnx2_napi[0];
-	bnapi->status_blk.msi = status_blk;
+	bnapi->status_blk.msi = bp->status_blk;
 	bnapi->hw_tx_cons_ptr =
 		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
 	bnapi->hw_rx_cons_ptr =
@@ -870,7 +892,7 @@
 
 			bnapi = &bp->bnx2_napi[i];
 
-			sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+			sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
 			bnapi->status_blk.msix = sblk;
 			bnapi->hw_tx_cons_ptr =
 				&sblk->status_tx_quick_consumer_index;
@@ -880,10 +902,6 @@
 		}
 	}
 
-	bp->stats_blk = status_blk + status_blk_size;
-
-	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
-
 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
 		bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
 		if (bp->ctx_pages == 0)
@@ -8330,6 +8348,11 @@
 
 	bp->phy_addr = 1;
 
+	/* allocate stats_blk */
+	rc = bnx2_alloc_stats_blk(dev);
+	if (rc)
+		goto err_out_unmap;
+
 	/* Disable WOL support if we are running on a SERDES chip. */
 	if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
 		bnx2_get_5709_media(bp);
@@ -8453,6 +8476,8 @@
 	pci_disable_device(pdev);
 
 err_out:
+	kfree(bp->temp_stats_blk);
+
 	return rc;
 }
 
@@ -8586,6 +8611,7 @@
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 err_free:
+	bnx2_free_stats_blk(dev);
 	free_netdev(dev);
 	return rc;
 }
@@ -8603,6 +8629,7 @@
 
 	pci_iounmap(bp->pdev, bp->regview);
 
+	bnx2_free_stats_blk(dev);
 	kfree(bp->temp_stats_blk);
 
 	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index f92f76c..380234d 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6928,6 +6928,7 @@
 
 	dma_addr_t		status_blk_mapping;
 
+	void *status_blk;
 	struct statistics_block	*stats_blk;
 	struct statistics_block	*temp_stats_blk;
 	dma_addr_t		stats_blk_mapping;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index aeb7ce6..d84efcd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1090,10 +1090,6 @@
 	bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
 
 	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
-	info->n_stats = BNX2X_NUM_STATS;
-	info->testinfo_len = BNX2X_NUM_TESTS(bp);
-	info->eedump_len = bp->common.flash_size;
-	info->regdump_len = bnx2x_get_regs_len(dev);
 }
 
 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -3351,6 +3347,13 @@
 			udp_rss_requested = 0;
 		else
 			return -EINVAL;
+
+		if (CHIP_IS_E1x(bp) && udp_rss_requested) {
+			DP(BNX2X_MSG_ETHTOOL,
+			   "57710, 57711 boards don't support RSS according to UDP 4-tuple\n");
+			return -EINVAL;
+		}
+
 		if ((info->flow_type == UDP_V4_FLOW) &&
 		    (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
 			bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
new file mode 100644
index 0000000..97e78e2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_BNXT) += bnxt_en.o
+
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
new file mode 100644
index 0000000..6c2e0c6
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -0,0 +1,5728 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <linux/time.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+#include <net/vxlan.h>
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/cache.h>
+#include <linux/log2.h>
+#include <linux/aer.h>
+#include <linux/bitmap.h>
+#include <linux/cpu_rmap.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#define BNXT_TX_TIMEOUT		(5 * HZ)
+
+static const char version[] =
+	"Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
+#define BNXT_RX_COPY_THRESH 256
+
+#define BNXT_TX_PUSH_THRESH 92
+
+enum board_idx {
+	BCM57302,
+	BCM57304,
+	BCM57404,
+	BCM57406,
+	BCM57304_VF,
+	BCM57404_VF,
+};
+
+/* indexed by enum above */
+static const struct {
+	char *name;
+} board_info[] = {
+	{ "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+	{ "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+	{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
+	{ "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" },
+	{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
+	{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
+};
+
+static const struct pci_device_id bnxt_pci_tbl[] = {
+	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
+	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
+	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
+	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+#ifdef CONFIG_BNXT_SRIOV
+	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
+	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
+#endif
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
+
+static const u16 bnxt_vf_req_snif[] = {
+	HWRM_FUNC_CFG,
+	HWRM_PORT_PHY_QCFG,
+	HWRM_CFA_L2_FILTER_ALLOC,
+};
+
+static bool bnxt_vf_pciid(enum board_idx idx)
+{
+	return (idx == BCM57304_VF || idx == BCM57404_VF);
+}
+
+#define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
+#define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+#define DB_CP_IRQ_DIS_FLAGS	(DB_KEY_CP | DB_IRQ_DIS)
+
+#define BNXT_CP_DB_REARM(db, raw_cons)					\
+		writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB(db, raw_cons)					\
+		writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
+
+#define BNXT_CP_DB_IRQ_DIS(db)						\
+		writel(DB_CP_IRQ_DIS_FLAGS, db)
+
+static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+	/* Tell compiler to fetch tx indices from memory. */
+	barrier();
+
+	return bp->tx_ring_size -
+		((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+}
+
+static const u16 bnxt_lhint_arr[] = {
+	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
+	TX_BD_FLAGS_LHINT_512_TO_1023,
+	TX_BD_FLAGS_LHINT_1024_TO_2047,
+	TX_BD_FLAGS_LHINT_1024_TO_2047,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
+};
+
+static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct tx_bd *txbd;
+	struct tx_bd_ext *txbd1;
+	struct netdev_queue *txq;
+	int i;
+	dma_addr_t mapping;
+	unsigned int length, pad = 0;
+	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
+	u16 prod, last_frag;
+	struct pci_dev *pdev = bp->pdev;
+	struct bnxt_napi *bnapi;
+	struct bnxt_tx_ring_info *txr;
+	struct bnxt_sw_tx_bd *tx_buf;
+
+	i = skb_get_queue_mapping(skb);
+	if (unlikely(i >= bp->tx_nr_rings)) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	bnapi = bp->bnapi[i];
+	txr = &bnapi->tx_ring;
+	txq = netdev_get_tx_queue(dev, i);
+	prod = txr->tx_prod;
+
+	free_size = bnxt_tx_avail(bp, txr);
+	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
+		netif_tx_stop_queue(txq);
+		return NETDEV_TX_BUSY;
+	}
+
+	length = skb->len;
+	len = skb_headlen(skb);
+	last_frag = skb_shinfo(skb)->nr_frags;
+
+	txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+	txbd->tx_bd_opaque = prod;
+
+	tx_buf = &txr->tx_buf_ring[prod];
+	tx_buf->skb = skb;
+	tx_buf->nr_frags = last_frag;
+
+	vlan_tag_flags = 0;
+	cfa_action = 0;
+	if (skb_vlan_tag_present(skb)) {
+		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
+				 skb_vlan_tag_get(skb);
+		/* Currently supports 8021Q, 8021AD vlan offloads
+		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
+		 */
+		if (skb->vlan_proto == htons(ETH_P_8021Q))
+			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
+	}
+
+	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
+		struct tx_push_bd *push = txr->tx_push;
+		struct tx_bd *tx_push = &push->txbd1;
+		struct tx_bd_ext *tx_push1 = &push->txbd2;
+		void *pdata = tx_push1 + 1;
+		int j;
+
+		/* Set COAL_NOW to be ready quickly for the next push */
+		tx_push->tx_bd_len_flags_type =
+			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
+					TX_BD_TYPE_LONG_TX_BD |
+					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
+					TX_BD_FLAGS_COAL_NOW |
+					TX_BD_FLAGS_PACKET_END |
+					(2 << TX_BD_FLAGS_BD_CNT_SHIFT));
+
+		if (skb->ip_summed == CHECKSUM_PARTIAL)
+			tx_push1->tx_bd_hsize_lflags =
+					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+		else
+			tx_push1->tx_bd_hsize_lflags = 0;
+
+		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+		tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+
+		skb_copy_from_linear_data(skb, pdata, len);
+		pdata += len;
+		for (j = 0; j < last_frag; j++) {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
+			void *fptr;
+
+			fptr = skb_frag_address_safe(frag);
+			if (!fptr)
+				goto normal_tx;
+
+			memcpy(pdata, fptr, skb_frag_size(frag));
+			pdata += skb_frag_size(frag);
+		}
+
+		memcpy(txbd, tx_push, sizeof(*txbd));
+		prod = NEXT_TX(prod);
+		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+		memcpy(txbd, tx_push1, sizeof(*txbd));
+		prod = NEXT_TX(prod);
+		push->doorbell =
+			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
+		txr->tx_prod = prod;
+
+		netdev_tx_sent_queue(txq, skb->len);
+
+		__iowrite64_copy(txr->tx_doorbell, push,
+				 (length + sizeof(*push) + 8) / 8);
+
+		tx_buf->is_push = 1;
+
+		goto tx_done;
+	}
+
+normal_tx:
+	if (length < BNXT_MIN_PKT_SIZE) {
+		pad = BNXT_MIN_PKT_SIZE - length;
+		if (skb_pad(skb, pad)) {
+			/* SKB already freed. */
+			tx_buf->skb = NULL;
+			return NETDEV_TX_OK;
+		}
+		length = BNXT_MIN_PKT_SIZE;
+	}
+
+	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
+
+	if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
+		dev_kfree_skb_any(skb);
+		tx_buf->skb = NULL;
+		return NETDEV_TX_OK;
+	}
+
+	dma_unmap_addr_set(tx_buf, mapping, mapping);
+	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+		((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
+
+	txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+	prod = NEXT_TX(prod);
+	txbd1 = (struct tx_bd_ext *)
+		&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+	txbd1->tx_bd_hsize_lflags = 0;
+	if (skb_is_gso(skb)) {
+		u32 hdr_len;
+
+		if (skb->encapsulation)
+			hdr_len = skb_inner_network_offset(skb) +
+				skb_inner_network_header_len(skb) +
+				inner_tcp_hdrlen(skb);
+		else
+			hdr_len = skb_transport_offset(skb) +
+				tcp_hdrlen(skb);
+
+		txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
+					TX_BD_FLAGS_T_IPID |
+					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
+		length = skb_shinfo(skb)->gso_size;
+		txbd1->tx_bd_mss = cpu_to_le32(length);
+		length += hdr_len;
+	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		txbd1->tx_bd_hsize_lflags =
+			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
+		txbd1->tx_bd_mss = 0;
+	}
+
+	length >>= 9;
+	flags |= bnxt_lhint_arr[length];
+	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+
+	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
+	txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+	for (i = 0; i < last_frag; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		prod = NEXT_TX(prod);
+		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+		len = skb_frag_size(frag);
+		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
+					   DMA_TO_DEVICE);
+
+		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
+			goto tx_dma_error;
+
+		tx_buf = &txr->tx_buf_ring[prod];
+		dma_unmap_addr_set(tx_buf, mapping, mapping);
+
+		txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+		flags = len << TX_BD_LEN_SHIFT;
+		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+	}
+
+	flags &= ~TX_BD_LEN;
+	txbd->tx_bd_len_flags_type =
+		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
+			    TX_BD_FLAGS_PACKET_END);
+
+	netdev_tx_sent_queue(txq, skb->len);
+
+	/* Sync BD data before updating doorbell */
+	wmb();
+
+	prod = NEXT_TX(prod);
+	txr->tx_prod = prod;
+
+	writel(DB_KEY_TX | prod, txr->tx_doorbell);
+	writel(DB_KEY_TX | prod, txr->tx_doorbell);
+
+tx_done:
+
+	mmiowb();
+
+	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
+		netif_tx_stop_queue(txq);
+
+		/* netif_tx_stop_queue() must be done before checking
+		 * tx index in bnxt_tx_avail() below, because in
+		 * bnxt_tx_int(), we update tx index before checking for
+		 * netif_tx_queue_stopped().
+		 */
+		smp_mb();
+		if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
+			netif_tx_wake_queue(txq);
+	}
+	return NETDEV_TX_OK;
+
+tx_dma_error:
+	last_frag = i;
+
+	/* start back at beginning and unmap skb */
+	prod = txr->tx_prod;
+	tx_buf = &txr->tx_buf_ring[prod];
+	tx_buf->skb = NULL;
+	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+			 skb_headlen(skb), PCI_DMA_TODEVICE);
+	prod = NEXT_TX(prod);
+
+	/* unmap remaining mapped pages */
+	for (i = 0; i < last_frag; i++) {
+		prod = NEXT_TX(prod);
+		tx_buf = &txr->tx_buf_ring[prod];
+		dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
+			       PCI_DMA_TODEVICE);
+	}
+
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+	struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+	int index = bnapi->index;
+	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
+	u16 cons = txr->tx_cons;
+	struct pci_dev *pdev = bp->pdev;
+	int i;
+	unsigned int tx_bytes = 0;
+
+	for (i = 0; i < nr_pkts; i++) {
+		struct bnxt_sw_tx_bd *tx_buf;
+		struct sk_buff *skb;
+		int j, last;
+
+		tx_buf = &txr->tx_buf_ring[cons];
+		cons = NEXT_TX(cons);
+		skb = tx_buf->skb;
+		tx_buf->skb = NULL;
+
+		if (tx_buf->is_push) {
+			tx_buf->is_push = 0;
+			goto next_tx_int;
+		}
+
+		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
+				 skb_headlen(skb), PCI_DMA_TODEVICE);
+		last = tx_buf->nr_frags;
+
+		for (j = 0; j < last; j++) {
+			cons = NEXT_TX(cons);
+			tx_buf = &txr->tx_buf_ring[cons];
+			dma_unmap_page(
+				&pdev->dev,
+				dma_unmap_addr(tx_buf, mapping),
+				skb_frag_size(&skb_shinfo(skb)->frags[j]),
+				PCI_DMA_TODEVICE);
+		}
+
+next_tx_int:
+		cons = NEXT_TX(cons);
+
+		tx_bytes += skb->len;
+		dev_kfree_skb_any(skb);
+	}
+
+	netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
+	txr->tx_cons = cons;
+
+	/* Need to make the tx_cons update visible to bnxt_start_xmit()
+	 * before checking for netif_tx_queue_stopped().  Without the
+	 * memory barrier, there is a small possibility that bnxt_start_xmit()
+	 * will miss it and cause the queue to be stopped forever.
+	 */
+	smp_mb();
+
+	if (unlikely(netif_tx_queue_stopped(txq)) &&
+	    (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+		__netif_tx_lock(txq, smp_processor_id());
+		if (netif_tx_queue_stopped(txq) &&
+		    bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+		    txr->dev_state != BNXT_DEV_STATE_CLOSING)
+			netif_tx_wake_queue(txq);
+		__netif_tx_unlock(txq);
+	}
+}
+
+static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+				       gfp_t gfp)
+{
+	u8 *data;
+	struct pci_dev *pdev = bp->pdev;
+
+	data = kmalloc(bp->rx_buf_size, gfp);
+	if (!data)
+		return NULL;
+
+	*mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
+				  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+
+	if (dma_mapping_error(&pdev->dev, *mapping)) {
+		kfree(data);
+		data = NULL;
+	}
+	return data;
+}
+
+static inline int bnxt_alloc_rx_data(struct bnxt *bp,
+				     struct bnxt_rx_ring_info *rxr,
+				     u16 prod, gfp_t gfp)
+{
+	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
+	u8 *data;
+	dma_addr_t mapping;
+
+	data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+	if (!data)
+		return -ENOMEM;
+
+	rx_buf->data = data;
+	dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+
+	return 0;
+}
+
+static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
+			       u8 *data)
+{
+	u16 prod = rxr->rx_prod;
+	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+	struct rx_bd *cons_bd, *prod_bd;
+
+	prod_rx_buf = &rxr->rx_buf_ring[prod];
+	cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+	prod_rx_buf->data = data;
+
+	dma_unmap_addr_set(prod_rx_buf, mapping,
+			   dma_unmap_addr(cons_rx_buf, mapping));
+
+	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+
+	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
+}
+
+static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+	u16 next, max = rxr->rx_agg_bmap_size;
+
+	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
+	if (next >= max)
+		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
+	return next;
+}
+
+static inline int bnxt_alloc_rx_page(struct bnxt *bp,
+				     struct bnxt_rx_ring_info *rxr,
+				     u16 prod, gfp_t gfp)
+{
+	struct rx_bd *rxbd =
+		&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
+	struct pci_dev *pdev = bp->pdev;
+	struct page *page;
+	dma_addr_t mapping;
+	u16 sw_prod = rxr->rx_sw_agg_prod;
+
+	page = alloc_page(gfp);
+	if (!page)
+		return -ENOMEM;
+
+	mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+			       PCI_DMA_FROMDEVICE);
+	if (dma_mapping_error(&pdev->dev, mapping)) {
+		__free_page(page);
+		return -EIO;
+	}
+
+	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+	__set_bit(sw_prod, rxr->rx_agg_bmap);
+	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
+	rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
+
+	rx_agg_buf->page = page;
+	rx_agg_buf->mapping = mapping;
+	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+	rxbd->rx_bd_opaque = sw_prod;
+	return 0;
+}
+
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
+				   u32 agg_bufs)
+{
+	struct bnxt *bp = bnapi->bp;
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+	u16 prod = rxr->rx_agg_prod;
+	u16 sw_prod = rxr->rx_sw_agg_prod;
+	u32 i;
+
+	for (i = 0; i < agg_bufs; i++) {
+		u16 cons;
+		struct rx_agg_cmp *agg;
+		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
+		struct rx_bd *prod_bd;
+		struct page *page;
+
+		agg = (struct rx_agg_cmp *)
+			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+		cons = agg->rx_agg_cmp_opaque;
+		__clear_bit(cons, rxr->rx_agg_bmap);
+
+		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
+			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
+
+		__set_bit(sw_prod, rxr->rx_agg_bmap);
+		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
+		cons_rx_buf = &rxr->rx_agg_ring[cons];
+
+		/* It is possible for sw_prod to be equal to cons, so
+		 * set cons_rx_buf->page to NULL first.
+		 */
+		page = cons_rx_buf->page;
+		cons_rx_buf->page = NULL;
+		prod_rx_buf->page = page;
+
+		prod_rx_buf->mapping = cons_rx_buf->mapping;
+
+		prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
+		prod_bd->rx_bd_opaque = sw_prod;
+
+		prod = NEXT_RX_AGG(prod);
+		sw_prod = NEXT_RX_AGG(sw_prod);
+		cp_cons = NEXT_CMP(cp_cons);
+	}
+	rxr->rx_agg_prod = prod;
+	rxr->rx_sw_agg_prod = sw_prod;
+}
+
+static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
+				   struct bnxt_rx_ring_info *rxr, u16 cons,
+				   u16 prod, u8 *data, dma_addr_t dma_addr,
+				   unsigned int len)
+{
+	int err;
+	struct sk_buff *skb;
+
+	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+	if (unlikely(err)) {
+		bnxt_reuse_rx_data(rxr, cons, data);
+		return NULL;
+	}
+
+	skb = build_skb(data, 0);
+	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+			 PCI_DMA_FROMDEVICE);
+	if (!skb) {
+		kfree(data);
+		return NULL;
+	}
+
+	skb_reserve(skb, BNXT_RX_OFFSET);
+	skb_put(skb, len);
+	return skb;
+}
+
+static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
+				     struct sk_buff *skb, u16 cp_cons,
+				     u32 agg_bufs)
+{
+	struct pci_dev *pdev = bp->pdev;
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+	u16 prod = rxr->rx_agg_prod;
+	u32 i;
+
+	for (i = 0; i < agg_bufs; i++) {
+		u16 cons, frag_len;
+		struct rx_agg_cmp *agg;
+		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
+		struct page *page;
+		dma_addr_t mapping;
+
+		agg = (struct rx_agg_cmp *)
+			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+		cons = agg->rx_agg_cmp_opaque;
+		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
+			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
+
+		cons_rx_buf = &rxr->rx_agg_ring[cons];
+		skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+		__clear_bit(cons, rxr->rx_agg_bmap);
+
+		/* It is possible for bnxt_alloc_rx_page() to allocate
+		 * a sw_prod index that equals the cons index, so we
+		 * need to clear the cons entry now.
+		 */
+		mapping = dma_unmap_addr(cons_rx_buf, mapping);
+		page = cons_rx_buf->page;
+		cons_rx_buf->page = NULL;
+
+		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
+			struct skb_shared_info *shinfo;
+			unsigned int nr_frags;
+
+			shinfo = skb_shinfo(skb);
+			nr_frags = --shinfo->nr_frags;
+			__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
+
+			dev_kfree_skb(skb);
+
+			cons_rx_buf->page = page;
+
+			/* Update prod since possibly some pages have been
+			 * allocated already.
+			 */
+			rxr->rx_agg_prod = prod;
+			bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
+			return NULL;
+		}
+
+		dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+			       PCI_DMA_FROMDEVICE);
+
+		skb->data_len += frag_len;
+		skb->len += frag_len;
+		skb->truesize += PAGE_SIZE;
+
+		prod = NEXT_RX_AGG(prod);
+		cp_cons = NEXT_CMP(cp_cons);
+	}
+	rxr->rx_agg_prod = prod;
+	return skb;
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+			       u8 agg_bufs, u32 *raw_cons)
+{
+	u16 last;
+	struct rx_agg_cmp *agg;
+
+	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
+	last = RING_CMP(*raw_cons);
+	agg = (struct rx_agg_cmp *)
+		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
+	return RX_AGG_CMP_VALID(agg, *raw_cons);
+}
+
+static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+					    unsigned int len,
+					    dma_addr_t mapping)
+{
+	struct bnxt *bp = bnapi->bp;
+	struct pci_dev *pdev = bp->pdev;
+	struct sk_buff *skb;
+
+	skb = napi_alloc_skb(&bnapi->napi, len);
+	if (!skb)
+		return NULL;
+
+	dma_sync_single_for_cpu(&pdev->dev, mapping,
+				bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
+
+	memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
+
+	dma_sync_single_for_device(&pdev->dev, mapping,
+				   bp->rx_copy_thresh,
+				   PCI_DMA_FROMDEVICE);
+
+	skb_put(skb, len);
+	return skb;
+}
+
+static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+			   struct rx_tpa_start_cmp *tpa_start,
+			   struct rx_tpa_start_cmp_ext *tpa_start1)
+{
+	u8 agg_id = TPA_START_AGG_ID(tpa_start);
+	u16 cons, prod;
+	struct bnxt_tpa_info *tpa_info;
+	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+	struct rx_bd *prod_bd;
+	dma_addr_t mapping;
+
+	cons = tpa_start->rx_tpa_start_cmp_opaque;
+	prod = rxr->rx_prod;
+	cons_rx_buf = &rxr->rx_buf_ring[cons];
+	prod_rx_buf = &rxr->rx_buf_ring[prod];
+	tpa_info = &rxr->rx_tpa[agg_id];
+
+	prod_rx_buf->data = tpa_info->data;
+
+	mapping = tpa_info->mapping;
+	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+
+	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
+
+	tpa_info->data = cons_rx_buf->data;
+	cons_rx_buf->data = NULL;
+	tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
+
+	tpa_info->len =
+		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
+				RX_TPA_START_CMP_LEN_SHIFT;
+	if (likely(TPA_START_HASH_VALID(tpa_start))) {
+		u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
+
+		tpa_info->hash_type = PKT_HASH_TYPE_L4;
+		tpa_info->gso_type = SKB_GSO_TCPV4;
+		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+		if (hash_type == 3)
+			tpa_info->gso_type = SKB_GSO_TCPV6;
+		tpa_info->rss_hash =
+			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
+	} else {
+		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
+		tpa_info->gso_type = 0;
+		if (netif_msg_rx_err(bp))
+			netdev_warn(bp->dev, "TPA packet without valid hash\n");
+	}
+	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
+	tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+
+	rxr->rx_prod = NEXT_RX(prod);
+	cons = NEXT_RX(cons);
+	cons_rx_buf = &rxr->rx_buf_ring[cons];
+
+	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
+	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
+	cons_rx_buf->data = NULL;
+}
+
+static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
+			   u16 cp_cons, u32 agg_bufs)
+{
+	if (agg_bufs)
+		bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+}
+
+#define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
+#define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
+
+static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
+					   struct rx_tpa_end_cmp *tpa_end,
+					   struct rx_tpa_end_cmp_ext *tpa_end1,
+					   struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+	struct tcphdr *th;
+	int payload_off, tcp_opt_len = 0;
+	int len, nw_off;
+
+	NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end);
+	skb_shinfo(skb)->gso_size =
+		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+	payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+		       RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
+		      RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+	if (TPA_END_GRO_TS(tpa_end))
+		tcp_opt_len = 12;
+
+	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
+		struct iphdr *iph;
+
+		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
+			 ETH_HLEN;
+		skb_set_network_header(skb, nw_off);
+		iph = ip_hdr(skb);
+		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
+		len = skb->len - skb_transport_offset(skb);
+		th = tcp_hdr(skb);
+		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
+	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
+		struct ipv6hdr *iph;
+
+		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
+			 ETH_HLEN;
+		skb_set_network_header(skb, nw_off);
+		iph = ipv6_hdr(skb);
+		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
+		len = skb->len - skb_transport_offset(skb);
+		th = tcp_hdr(skb);
+		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
+	} else {
+		dev_kfree_skb_any(skb);
+		return NULL;
+	}
+	tcp_gro_complete(skb);
+
+	if (nw_off) { /* tunnel */
+		struct udphdr *uh = NULL;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *iph = (struct iphdr *)skb->data;
+
+			if (iph->protocol == IPPROTO_UDP)
+				uh = (struct udphdr *)(iph + 1);
+		} else {
+			struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+			if (iph->nexthdr == IPPROTO_UDP)
+				uh = (struct udphdr *)(iph + 1);
+		}
+		if (uh) {
+			if (uh->check)
+				skb_shinfo(skb)->gso_type |=
+					SKB_GSO_UDP_TUNNEL_CSUM;
+			else
+				skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+		}
+	}
+#endif
+	return skb;
+}
+
+static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
+					   struct bnxt_napi *bnapi,
+					   u32 *raw_cons,
+					   struct rx_tpa_end_cmp *tpa_end,
+					   struct rx_tpa_end_cmp_ext *tpa_end1,
+					   bool *agg_event)
+{
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+	u8 agg_id = TPA_END_AGG_ID(tpa_end);
+	u8 *data, agg_bufs;
+	u16 cp_cons = RING_CMP(*raw_cons);
+	unsigned int len;
+	struct bnxt_tpa_info *tpa_info;
+	dma_addr_t mapping;
+	struct sk_buff *skb;
+
+	tpa_info = &rxr->rx_tpa[agg_id];
+	data = tpa_info->data;
+	prefetch(data);
+	len = tpa_info->len;
+	mapping = tpa_info->mapping;
+
+	agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+		    RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+
+	if (agg_bufs) {
+		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
+			return ERR_PTR(-EBUSY);
+
+		*agg_event = true;
+		cp_cons = NEXT_CMP(cp_cons);
+	}
+
+	if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
+		bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+		netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
+			    agg_bufs, (int)MAX_SKB_FRAGS);
+		return NULL;
+	}
+
+	if (len <= bp->rx_copy_thresh) {
+		skb = bnxt_copy_skb(bnapi, data, len, mapping);
+		if (!skb) {
+			bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+			return NULL;
+		}
+	} else {
+		u8 *new_data;
+		dma_addr_t new_mapping;
+
+		new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+		if (!new_data) {
+			bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+			return NULL;
+		}
+
+		tpa_info->data = new_data;
+		tpa_info->mapping = new_mapping;
+
+		skb = build_skb(data, 0);
+		dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
+				 PCI_DMA_FROMDEVICE);
+
+		if (!skb) {
+			kfree(data);
+			bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
+			return NULL;
+		}
+		skb_reserve(skb, BNXT_RX_OFFSET);
+		skb_put(skb, len);
+	}
+
+	if (agg_bufs) {
+		skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+		if (!skb) {
+			/* Page reuse already handled by bnxt_rx_pages(). */
+			return NULL;
+		}
+	}
+	skb->protocol = eth_type_trans(skb, bp->dev);
+
+	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
+		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
+
+	if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
+		netdev_features_t features = skb->dev->features;
+		u16 vlan_proto = tpa_info->metadata >>
+			RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+		if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+		     vlan_proto == ETH_P_8021Q) ||
+		    ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+		     vlan_proto == ETH_P_8021AD)) {
+			__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+					       tpa_info->metadata &
+					       RX_CMP_FLAGS2_METADATA_VID_MASK);
+		}
+	}
+
+	skb_checksum_none_assert(skb);
+	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		skb->csum_level =
+			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
+	}
+
+	if (TPA_END_GRO(tpa_end))
+		skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
+
+	return skb;
+}
+
+/* returns the following:
+ * 1       - 1 packet successfully received
+ * 0       - successful TPA_START, packet not completed yet
+ * -EBUSY  - completion ring does not have all the agg buffers yet
+ * -ENOMEM - packet aborted due to out of memory
+ * -EIO    - packet aborted due to hw error indicated in BD
+ */
+static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
+		       bool *agg_event)
+{
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+	struct net_device *dev = bp->dev;
+	struct rx_cmp *rxcmp;
+	struct rx_cmp_ext *rxcmp1;
+	u32 tmp_raw_cons = *raw_cons;
+	u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+	struct bnxt_sw_rx_bd *rx_buf;
+	unsigned int len;
+	u8 *data, agg_bufs, cmp_type;
+	dma_addr_t dma_addr;
+	struct sk_buff *skb;
+	int rc = 0;
+
+	rxcmp = (struct rx_cmp *)
+			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
+	cp_cons = RING_CMP(tmp_raw_cons);
+	rxcmp1 = (struct rx_cmp_ext *)
+			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+		return -EBUSY;
+
+	cmp_type = RX_CMP_TYPE(rxcmp);
+
+	prod = rxr->rx_prod;
+
+	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
+		bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
+			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
+
+		goto next_rx_no_prod;
+
+	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+		skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
+				   (struct rx_tpa_end_cmp *)rxcmp,
+				   (struct rx_tpa_end_cmp_ext *)rxcmp1,
+				   agg_event);
+
+		if (unlikely(IS_ERR(skb)))
+			return -EBUSY;
+
+		rc = -ENOMEM;
+		if (likely(skb)) {
+			skb_record_rx_queue(skb, bnapi->index);
+			skb_mark_napi_id(skb, &bnapi->napi);
+			if (bnxt_busy_polling(bnapi))
+				netif_receive_skb(skb);
+			else
+				napi_gro_receive(&bnapi->napi, skb);
+			rc = 1;
+		}
+		goto next_rx_no_prod;
+	}
+
+	cons = rxcmp->rx_cmp_opaque;
+	rx_buf = &rxr->rx_buf_ring[cons];
+	data = rx_buf->data;
+	prefetch(data);
+
+	agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
+				RX_CMP_AGG_BUFS_SHIFT;
+
+	if (agg_bufs) {
+		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
+			return -EBUSY;
+
+		cp_cons = NEXT_CMP(cp_cons);
+		*agg_event = true;
+	}
+
+	rx_buf->data = NULL;
+	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+		bnxt_reuse_rx_data(rxr, cons, data);
+		if (agg_bufs)
+			bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
+
+		rc = -EIO;
+		goto next_rx;
+	}
+
+	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+	dma_addr = dma_unmap_addr(rx_buf, mapping);
+
+	if (len <= bp->rx_copy_thresh) {
+		skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
+		bnxt_reuse_rx_data(rxr, cons, data);
+		if (!skb) {
+			rc = -ENOMEM;
+			goto next_rx;
+		}
+	} else {
+		skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
+		if (!skb) {
+			rc = -ENOMEM;
+			goto next_rx;
+		}
+	}
+
+	if (agg_bufs) {
+		skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
+		if (!skb) {
+			rc = -ENOMEM;
+			goto next_rx;
+		}
+	}
+
+	if (RX_CMP_HASH_VALID(rxcmp)) {
+		u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+		enum pkt_hash_types type = PKT_HASH_TYPE_L4;
+
+		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
+		if (hash_type != 1 && hash_type != 3)
+			type = PKT_HASH_TYPE_L3;
+		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
+	}
+
+	skb->protocol = eth_type_trans(skb, dev);
+
+	if (rxcmp1->rx_cmp_flags2 &
+	    cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
+		netdev_features_t features = skb->dev->features;
+		u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+		u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+		if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
+		     vlan_proto == ETH_P_8021Q) ||
+		    ((features & NETIF_F_HW_VLAN_STAG_RX) &&
+		     vlan_proto == ETH_P_8021AD))
+			__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
+					       meta_data &
+					       RX_CMP_FLAGS2_METADATA_VID_MASK);
+	}
+
+	skb_checksum_none_assert(skb);
+	if (RX_CMP_L4_CS_OK(rxcmp1)) {
+		if (dev->features & NETIF_F_RXCSUM) {
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+		}
+	} else {
+		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)
+			cpr->rx_l4_csum_errors++;
+	}
+
+	skb_record_rx_queue(skb, bnapi->index);
+	skb_mark_napi_id(skb, &bnapi->napi);
+	if (bnxt_busy_polling(bnapi))
+		netif_receive_skb(skb);
+	else
+		napi_gro_receive(&bnapi->napi, skb);
+	rc = 1;
+
+next_rx:
+	rxr->rx_prod = NEXT_RX(prod);
+
+next_rx_no_prod:
+	*raw_cons = tmp_raw_cons;
+
+	return rc;
+}
+
+static int bnxt_async_event_process(struct bnxt *bp,
+				    struct hwrm_async_event_cmpl *cmpl)
+{
+	u16 event_id = le16_to_cpu(cmpl->event_id);
+
+	/* TODO CHIMP_FW: Define event id's for link change, error etc */
+	switch (event_id) {
+	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
+		schedule_work(&bp->sp_task);
+		break;
+	default:
+		netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
+			   event_id);
+		break;
+	}
+	return 0;
+}
+
+static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
+{
+	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
+	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
+	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
+				(struct hwrm_fwd_req_cmpl *)txcmp;
+
+	switch (cmpl_type) {
+	case CMPL_BASE_TYPE_HWRM_DONE:
+		seq_id = le16_to_cpu(h_cmpl->sequence_id);
+		if (seq_id == bp->hwrm_intr_seq_id)
+			bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
+		else
+			netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
+		break;
+
+	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
+		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
+
+		if ((vf_id < bp->pf.first_vf_id) ||
+		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
+			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
+				   vf_id);
+			return -EINVAL;
+		}
+
+		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
+		set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
+		schedule_work(&bp->sp_task);
+		break;
+
+	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+		bnxt_async_event_process(bp,
+					 (struct hwrm_async_event_cmpl *)txcmp);
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static irqreturn_t bnxt_msix(int irq, void *dev_instance)
+{
+	struct bnxt_napi *bnapi = dev_instance;
+	struct bnxt *bp = bnapi->bp;
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	u32 cons = RING_CMP(cpr->cp_raw_cons);
+
+	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+	napi_schedule(&bnapi->napi);
+	return IRQ_HANDLED;
+}
+
+static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+{
+	u32 raw_cons = cpr->cp_raw_cons;
+	u16 cons = RING_CMP(raw_cons);
+	struct tx_cmp *txcmp;
+
+	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+	return TX_CMP_VALID(txcmp, raw_cons);
+}
+
+#define CAG_LEGACY_INT_STATUS	0x2014
+
+static irqreturn_t bnxt_inta(int irq, void *dev_instance)
+{
+	struct bnxt_napi *bnapi = dev_instance;
+	struct bnxt *bp = bnapi->bp;
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	u32 cons = RING_CMP(cpr->cp_raw_cons);
+	u32 int_status;
+
+	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
+
+	if (!bnxt_has_work(bp, cpr)) {
+		int_status = readl(bp->bar0 + CAG_LEGACY_INT_STATUS);
+		/* return if erroneous interrupt */
+		if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
+			return IRQ_NONE;
+	}
+
+	/* disable ring IRQ */
+	BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
+
+	/* Return here if interrupt is shared and is disabled. */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0))
+		return IRQ_HANDLED;
+
+	napi_schedule(&bnapi->napi);
+	return IRQ_HANDLED;
+}
+
+static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
+{
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	u32 raw_cons = cpr->cp_raw_cons;
+	u32 cons;
+	int tx_pkts = 0;
+	int rx_pkts = 0;
+	bool rx_event = false;
+	bool agg_event = false;
+	struct tx_cmp *txcmp;
+
+	while (1) {
+		int rc;
+
+		cons = RING_CMP(raw_cons);
+		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+		if (!TX_CMP_VALID(txcmp, raw_cons))
+			break;
+
+		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
+			tx_pkts++;
+			/* return full budget so NAPI will complete. */
+			if (unlikely(tx_pkts > bp->tx_wake_thresh))
+				rx_pkts = budget;
+		} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+			rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+			if (likely(rc >= 0))
+				rx_pkts += rc;
+			else if (rc == -EBUSY)	/* partial completion */
+				break;
+			rx_event = true;
+		} else if (unlikely((TX_CMP_TYPE(txcmp) ==
+				     CMPL_BASE_TYPE_HWRM_DONE) ||
+				    (TX_CMP_TYPE(txcmp) ==
+				     CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
+				    (TX_CMP_TYPE(txcmp) ==
+				     CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
+			bnxt_hwrm_handler(bp, txcmp);
+		}
+		raw_cons = NEXT_RAW_CMP(raw_cons);
+
+		if (rx_pkts == budget)
+			break;
+	}
+
+	cpr->cp_raw_cons = raw_cons;
+	/* ACK completion ring before freeing tx ring and producing new
+	 * buffers in rx/agg rings to prevent overflowing the completion
+	 * ring.
+	 */
+	BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+	if (tx_pkts)
+		bnxt_tx_int(bp, bnapi, tx_pkts);
+
+	if (rx_event) {
+		struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+
+		writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+		writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+		if (agg_event) {
+			writel(DB_KEY_RX | rxr->rx_agg_prod,
+			       rxr->rx_agg_doorbell);
+			writel(DB_KEY_RX | rxr->rx_agg_prod,
+			       rxr->rx_agg_doorbell);
+		}
+	}
+	return rx_pkts;
+}
+
+static int bnxt_poll(struct napi_struct *napi, int budget)
+{
+	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+	struct bnxt *bp = bnapi->bp;
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	int work_done = 0;
+
+	if (!bnxt_lock_napi(bnapi))
+		return budget;
+
+	while (1) {
+		work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
+
+		if (work_done >= budget)
+			break;
+
+		if (!bnxt_has_work(bp, cpr)) {
+			napi_complete(napi);
+			BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+			break;
+		}
+	}
+	mmiowb();
+	bnxt_unlock_napi(bnapi);
+	return work_done;
+}
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int bnxt_busy_poll(struct napi_struct *napi)
+{
+	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+	struct bnxt *bp = bnapi->bp;
+	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+	int rx_work, budget = 4;
+
+	if (atomic_read(&bp->intr_sem) != 0)
+		return LL_FLUSH_FAILED;
+
+	if (!bnxt_lock_poll(bnapi))
+		return LL_FLUSH_BUSY;
+
+	rx_work = bnxt_poll_work(bp, bnapi, budget);
+
+	BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+
+	bnxt_unlock_poll(bnapi);
+	return rx_work;
+}
+#endif
+
+static void bnxt_free_tx_skbs(struct bnxt *bp)
+{
+	int i, max_idx;
+	struct pci_dev *pdev = bp->pdev;
+
+	if (!bp->bnapi)
+		return;
+
+	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
+	for (i = 0; i < bp->tx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_tx_ring_info *txr;
+		int j;
+
+		if (!bnapi)
+			continue;
+
+		txr = &bnapi->tx_ring;
+		for (j = 0; j < max_idx;) {
+			struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+			struct sk_buff *skb = tx_buf->skb;
+			int k, last;
+
+			if (!skb) {
+				j++;
+				continue;
+			}
+
+			tx_buf->skb = NULL;
+
+			if (tx_buf->is_push) {
+				dev_kfree_skb(skb);
+				j += 2;
+				continue;
+			}
+
+			dma_unmap_single(&pdev->dev,
+					 dma_unmap_addr(tx_buf, mapping),
+					 skb_headlen(skb),
+					 PCI_DMA_TODEVICE);
+
+			last = tx_buf->nr_frags;
+			j += 2;
+			for (k = 0; k < last; k++, j = NEXT_TX(j)) {
+				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
+
+				tx_buf = &txr->tx_buf_ring[j];
+				dma_unmap_page(
+					&pdev->dev,
+					dma_unmap_addr(tx_buf, mapping),
+					skb_frag_size(frag), PCI_DMA_TODEVICE);
+			}
+			dev_kfree_skb(skb);
+		}
+		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
+	}
+}
+
+static void bnxt_free_rx_skbs(struct bnxt *bp)
+{
+	int i, max_idx, max_agg_idx;
+	struct pci_dev *pdev = bp->pdev;
+
+	if (!bp->bnapi)
+		return;
+
+	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
+	max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
+	for (i = 0; i < bp->rx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_rx_ring_info *rxr;
+		int j;
+
+		if (!bnapi)
+			continue;
+
+		rxr = &bnapi->rx_ring;
+
+		if (rxr->rx_tpa) {
+			for (j = 0; j < MAX_TPA; j++) {
+				struct bnxt_tpa_info *tpa_info =
+							&rxr->rx_tpa[j];
+				u8 *data = tpa_info->data;
+
+				if (!data)
+					continue;
+
+				dma_unmap_single(
+					&pdev->dev,
+					dma_unmap_addr(tpa_info, mapping),
+					bp->rx_buf_use_size,
+					PCI_DMA_FROMDEVICE);
+
+				tpa_info->data = NULL;
+
+				kfree(data);
+			}
+		}
+
+		for (j = 0; j < max_idx; j++) {
+			struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
+			u8 *data = rx_buf->data;
+
+			if (!data)
+				continue;
+
+			dma_unmap_single(&pdev->dev,
+					 dma_unmap_addr(rx_buf, mapping),
+					 bp->rx_buf_use_size,
+					 PCI_DMA_FROMDEVICE);
+
+			rx_buf->data = NULL;
+
+			kfree(data);
+		}
+
+		for (j = 0; j < max_agg_idx; j++) {
+			struct bnxt_sw_rx_agg_bd *rx_agg_buf =
+				&rxr->rx_agg_ring[j];
+			struct page *page = rx_agg_buf->page;
+
+			if (!page)
+				continue;
+
+			dma_unmap_page(&pdev->dev,
+				       dma_unmap_addr(rx_agg_buf, mapping),
+				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+			rx_agg_buf->page = NULL;
+			__clear_bit(j, rxr->rx_agg_bmap);
+
+			__free_page(page);
+		}
+	}
+}
+
+static void bnxt_free_skbs(struct bnxt *bp)
+{
+	bnxt_free_tx_skbs(bp);
+	bnxt_free_rx_skbs(bp);
+}
+
+static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+	struct pci_dev *pdev = bp->pdev;
+	int i;
+
+	for (i = 0; i < ring->nr_pages; i++) {
+		if (!ring->pg_arr[i])
+			continue;
+
+		dma_free_coherent(&pdev->dev, ring->page_size,
+				  ring->pg_arr[i], ring->dma_arr[i]);
+
+		ring->pg_arr[i] = NULL;
+	}
+	if (ring->pg_tbl) {
+		dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
+				  ring->pg_tbl, ring->pg_tbl_map);
+		ring->pg_tbl = NULL;
+	}
+	if (ring->vmem_size && *ring->vmem) {
+		vfree(*ring->vmem);
+		*ring->vmem = NULL;
+	}
+}
+
+static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
+{
+	int i;
+	struct pci_dev *pdev = bp->pdev;
+
+	if (ring->nr_pages > 1) {
+		ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
+						  ring->nr_pages * 8,
+						  &ring->pg_tbl_map,
+						  GFP_KERNEL);
+		if (!ring->pg_tbl)
+			return -ENOMEM;
+	}
+
+	for (i = 0; i < ring->nr_pages; i++) {
+		ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+						     ring->page_size,
+						     &ring->dma_arr[i],
+						     GFP_KERNEL);
+		if (!ring->pg_arr[i])
+			return -ENOMEM;
+
+		if (ring->nr_pages > 1)
+			ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
+	}
+
+	if (ring->vmem_size) {
+		*ring->vmem = vzalloc(ring->vmem_size);
+		if (!(*ring->vmem))
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+static void bnxt_free_rx_rings(struct bnxt *bp)
+{
+	int i;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->rx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_rx_ring_info *rxr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		rxr = &bnapi->rx_ring;
+
+		kfree(rxr->rx_tpa);
+		rxr->rx_tpa = NULL;
+
+		kfree(rxr->rx_agg_bmap);
+		rxr->rx_agg_bmap = NULL;
+
+		ring = &rxr->rx_ring_struct;
+		bnxt_free_ring(bp, ring);
+
+		ring = &rxr->rx_agg_ring_struct;
+		bnxt_free_ring(bp, ring);
+	}
+}
+
+static int bnxt_alloc_rx_rings(struct bnxt *bp)
+{
+	int i, rc, agg_rings = 0, tpa_rings = 0;
+
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		agg_rings = 1;
+
+	if (bp->flags & BNXT_FLAG_TPA)
+		tpa_rings = 1;
+
+	for (i = 0; i < bp->rx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_rx_ring_info *rxr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		rxr = &bnapi->rx_ring;
+		ring = &rxr->rx_ring_struct;
+
+		rc = bnxt_alloc_ring(bp, ring);
+		if (rc)
+			return rc;
+
+		if (agg_rings) {
+			u16 mem_size;
+
+			ring = &rxr->rx_agg_ring_struct;
+			rc = bnxt_alloc_ring(bp, ring);
+			if (rc)
+				return rc;
+
+			rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+			mem_size = rxr->rx_agg_bmap_size / 8;
+			rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+			if (!rxr->rx_agg_bmap)
+				return -ENOMEM;
+
+			if (tpa_rings) {
+				rxr->rx_tpa = kcalloc(MAX_TPA,
+						sizeof(struct bnxt_tpa_info),
+						GFP_KERNEL);
+				if (!rxr->rx_tpa)
+					return -ENOMEM;
+			}
+		}
+	}
+	return 0;
+}
+
+static void bnxt_free_tx_rings(struct bnxt *bp)
+{
+	int i;
+	struct pci_dev *pdev = bp->pdev;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->tx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_tx_ring_info *txr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		txr = &bnapi->tx_ring;
+
+		if (txr->tx_push) {
+			dma_free_coherent(&pdev->dev, bp->tx_push_size,
+					  txr->tx_push, txr->tx_push_mapping);
+			txr->tx_push = NULL;
+		}
+
+		ring = &txr->tx_ring_struct;
+
+		bnxt_free_ring(bp, ring);
+	}
+}
+
+static int bnxt_alloc_tx_rings(struct bnxt *bp)
+{
+	int i, j, rc;
+	struct pci_dev *pdev = bp->pdev;
+
+	bp->tx_push_size = 0;
+	if (bp->tx_push_thresh) {
+		int push_size;
+
+		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
+					bp->tx_push_thresh);
+
+		if (push_size > 128) {
+			push_size = 0;
+			bp->tx_push_thresh = 0;
+		}
+
+		bp->tx_push_size = push_size;
+	}
+
+	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_tx_ring_info *txr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		txr = &bnapi->tx_ring;
+		ring = &txr->tx_ring_struct;
+
+		rc = bnxt_alloc_ring(bp, ring);
+		if (rc)
+			return rc;
+
+		if (bp->tx_push_size) {
+			struct tx_bd *txbd;
+			dma_addr_t mapping;
+
+			/* One pre-allocated DMA buffer to backup
+			 * TX push operation
+			 */
+			txr->tx_push = dma_alloc_coherent(&pdev->dev,
+						bp->tx_push_size,
+						&txr->tx_push_mapping,
+						GFP_KERNEL);
+
+			if (!txr->tx_push)
+				return -ENOMEM;
+
+			txbd = &txr->tx_push->txbd1;
+
+			mapping = txr->tx_push_mapping +
+				sizeof(struct tx_push_bd);
+			txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+			memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
+		}
+		ring->queue_id = bp->q_info[j].queue_id;
+		if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+			j++;
+	}
+	return 0;
+}
+
+static void bnxt_free_cp_rings(struct bnxt *bp)
+{
+	int i;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		cpr = &bnapi->cp_ring;
+		ring = &cpr->cp_ring_struct;
+
+		bnxt_free_ring(bp, ring);
+	}
+}
+
+static int bnxt_alloc_cp_rings(struct bnxt *bp)
+{
+	int i, rc;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		cpr = &bnapi->cp_ring;
+		ring = &cpr->cp_ring_struct;
+
+		rc = bnxt_alloc_ring(bp, ring);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+static void bnxt_init_ring_struct(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr;
+		struct bnxt_rx_ring_info *rxr;
+		struct bnxt_tx_ring_info *txr;
+		struct bnxt_ring_struct *ring;
+
+		if (!bnapi)
+			continue;
+
+		cpr = &bnapi->cp_ring;
+		ring = &cpr->cp_ring_struct;
+		ring->nr_pages = bp->cp_nr_pages;
+		ring->page_size = HW_CMPD_RING_SIZE;
+		ring->pg_arr = (void **)cpr->cp_desc_ring;
+		ring->dma_arr = cpr->cp_desc_mapping;
+		ring->vmem_size = 0;
+
+		rxr = &bnapi->rx_ring;
+		ring = &rxr->rx_ring_struct;
+		ring->nr_pages = bp->rx_nr_pages;
+		ring->page_size = HW_RXBD_RING_SIZE;
+		ring->pg_arr = (void **)rxr->rx_desc_ring;
+		ring->dma_arr = rxr->rx_desc_mapping;
+		ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+		ring->vmem = (void **)&rxr->rx_buf_ring;
+
+		ring = &rxr->rx_agg_ring_struct;
+		ring->nr_pages = bp->rx_agg_nr_pages;
+		ring->page_size = HW_RXBD_RING_SIZE;
+		ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
+		ring->dma_arr = rxr->rx_agg_desc_mapping;
+		ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+		ring->vmem = (void **)&rxr->rx_agg_ring;
+
+		txr = &bnapi->tx_ring;
+		ring = &txr->tx_ring_struct;
+		ring->nr_pages = bp->tx_nr_pages;
+		ring->page_size = HW_RXBD_RING_SIZE;
+		ring->pg_arr = (void **)txr->tx_desc_ring;
+		ring->dma_arr = txr->tx_desc_mapping;
+		ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
+		ring->vmem = (void **)&txr->tx_buf_ring;
+	}
+}
+
+static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
+{
+	int i;
+	u32 prod;
+	struct rx_bd **rx_buf_ring;
+
+	rx_buf_ring = (struct rx_bd **)ring->pg_arr;
+	for (i = 0, prod = 0; i < ring->nr_pages; i++) {
+		int j;
+		struct rx_bd *rxbd;
+
+		rxbd = rx_buf_ring[i];
+		if (!rxbd)
+			continue;
+
+		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
+			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
+			rxbd->rx_bd_opaque = prod;
+		}
+	}
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+	struct net_device *dev = bp->dev;
+	struct bnxt_napi *bnapi = bp->bnapi[ring_nr];
+	struct bnxt_rx_ring_info *rxr;
+	struct bnxt_ring_struct *ring;
+	u32 prod, type;
+	int i;
+
+	if (!bnapi)
+		return -EINVAL;
+
+	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+	if (NET_IP_ALIGN == 2)
+		type |= RX_BD_FLAGS_SOP;
+
+	rxr = &bnapi->rx_ring;
+	ring = &rxr->rx_ring_struct;
+	bnxt_init_rxbd_pages(ring, type);
+
+	prod = rxr->rx_prod;
+	for (i = 0; i < bp->rx_ring_size; i++) {
+		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
+			netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+				    ring_nr, i, bp->rx_ring_size);
+			break;
+		}
+		prod = NEXT_RX(prod);
+	}
+	rxr->rx_prod = prod;
+	ring->fw_ring_id = INVALID_HW_RING_ID;
+
+	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+		return 0;
+
+	ring = &rxr->rx_agg_ring_struct;
+
+	type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+		RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+	bnxt_init_rxbd_pages(ring, type);
+
+	prod = rxr->rx_agg_prod;
+	for (i = 0; i < bp->rx_agg_ring_size; i++) {
+		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
+			netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
+				    ring_nr, i, bp->rx_ring_size);
+			break;
+		}
+		prod = NEXT_RX_AGG(prod);
+	}
+	rxr->rx_agg_prod = prod;
+	ring->fw_ring_id = INVALID_HW_RING_ID;
+
+	if (bp->flags & BNXT_FLAG_TPA) {
+		if (rxr->rx_tpa) {
+			u8 *data;
+			dma_addr_t mapping;
+
+			for (i = 0; i < MAX_TPA; i++) {
+				data = __bnxt_alloc_rx_data(bp, &mapping,
+							    GFP_KERNEL);
+				if (!data)
+					return -ENOMEM;
+
+				rxr->rx_tpa[i].data = data;
+				rxr->rx_tpa[i].mapping = mapping;
+			}
+		} else {
+			netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static int bnxt_init_rx_rings(struct bnxt *bp)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < bp->rx_nr_rings; i++) {
+		rc = bnxt_init_one_rx_ring(bp, i);
+		if (rc)
+			break;
+	}
+
+	return rc;
+}
+
+static int bnxt_init_tx_rings(struct bnxt *bp)
+{
+	u16 i;
+
+	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
+				   MAX_SKB_FRAGS + 1);
+
+	for (i = 0; i < bp->tx_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+
+		ring->fw_ring_id = INVALID_HW_RING_ID;
+	}
+
+	return 0;
+}
+
+static void bnxt_free_ring_grps(struct bnxt *bp)
+{
+	kfree(bp->grp_info);
+	bp->grp_info = NULL;
+}
+
+static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
+{
+	int i;
+
+	if (irq_re_init) {
+		bp->grp_info = kcalloc(bp->cp_nr_rings,
+				       sizeof(struct bnxt_ring_grp_info),
+				       GFP_KERNEL);
+		if (!bp->grp_info)
+			return -ENOMEM;
+	}
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		if (irq_re_init)
+			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
+		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
+		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
+		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+	}
+	return 0;
+}
+
+static void bnxt_free_vnics(struct bnxt *bp)
+{
+	kfree(bp->vnic_info);
+	bp->vnic_info = NULL;
+	bp->nr_vnics = 0;
+}
+
+static int bnxt_alloc_vnics(struct bnxt *bp)
+{
+	int num_vnics = 1;
+
+#ifdef CONFIG_RFS_ACCEL
+	if (bp->flags & BNXT_FLAG_RFS)
+		num_vnics += bp->rx_nr_rings;
+#endif
+
+	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
+				GFP_KERNEL);
+	if (!bp->vnic_info)
+		return -ENOMEM;
+
+	bp->nr_vnics = num_vnics;
+	return 0;
+}
+
+static void bnxt_init_vnics(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->nr_vnics; i++) {
+		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+		vnic->fw_vnic_id = INVALID_HW_RING_ID;
+		vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
+
+		if (bp->vnic_info[i].rss_hash_key) {
+			if (i == 0)
+				prandom_bytes(vnic->rss_hash_key,
+					      HW_HASH_KEY_SIZE);
+			else
+				memcpy(vnic->rss_hash_key,
+				       bp->vnic_info[0].rss_hash_key,
+				       HW_HASH_KEY_SIZE);
+		}
+	}
+}
+
+static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
+{
+	int pages;
+
+	pages = ring_size / desc_per_pg;
+
+	if (!pages)
+		return 1;
+
+	pages++;
+
+	while (pages & (pages - 1))
+		pages++;
+
+	return pages;
+}
+
+static void bnxt_set_tpa_flags(struct bnxt *bp)
+{
+	bp->flags &= ~BNXT_FLAG_TPA;
+	if (bp->dev->features & NETIF_F_LRO)
+		bp->flags |= BNXT_FLAG_LRO;
+	if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+		bp->flags |= BNXT_FLAG_GRO;
+}
+
+/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
+ * be set on entry.
+ */
+void bnxt_set_ring_params(struct bnxt *bp)
+{
+	u32 ring_size, rx_size, rx_space;
+	u32 agg_factor = 0, agg_ring_size = 0;
+
+	/* 8 for CRC and VLAN */
+	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
+
+	rx_space = rx_size + NET_SKB_PAD +
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
+	ring_size = bp->rx_ring_size;
+	bp->rx_agg_ring_size = 0;
+	bp->rx_agg_nr_pages = 0;
+
+	if (bp->flags & BNXT_FLAG_TPA)
+		agg_factor = 4;
+
+	bp->flags &= ~BNXT_FLAG_JUMBO;
+	if (rx_space > PAGE_SIZE) {
+		u32 jumbo_factor;
+
+		bp->flags |= BNXT_FLAG_JUMBO;
+		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
+		if (jumbo_factor > agg_factor)
+			agg_factor = jumbo_factor;
+	}
+	agg_ring_size = ring_size * agg_factor;
+
+	if (agg_ring_size) {
+		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
+							RX_DESC_CNT);
+		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
+			u32 tmp = agg_ring_size;
+
+			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
+			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
+			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
+				    tmp, agg_ring_size);
+		}
+		bp->rx_agg_ring_size = agg_ring_size;
+		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
+		rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+		rx_space = rx_size + NET_SKB_PAD +
+			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+	}
+
+	bp->rx_buf_use_size = rx_size;
+	bp->rx_buf_size = rx_space;
+
+	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
+	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
+
+	ring_size = bp->tx_ring_size;
+	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
+	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
+
+	ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
+	bp->cp_ring_size = ring_size;
+
+	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
+	if (bp->cp_nr_pages > MAX_CP_PAGES) {
+		bp->cp_nr_pages = MAX_CP_PAGES;
+		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
+		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
+			    ring_size, bp->cp_ring_size);
+	}
+	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
+	bp->cp_ring_mask = bp->cp_bit - 1;
+}
+
+static void bnxt_free_vnic_attributes(struct bnxt *bp)
+{
+	int i;
+	struct bnxt_vnic_info *vnic;
+	struct pci_dev *pdev = bp->pdev;
+
+	if (!bp->vnic_info)
+		return;
+
+	for (i = 0; i < bp->nr_vnics; i++) {
+		vnic = &bp->vnic_info[i];
+
+		kfree(vnic->fw_grp_ids);
+		vnic->fw_grp_ids = NULL;
+
+		kfree(vnic->uc_list);
+		vnic->uc_list = NULL;
+
+		if (vnic->mc_list) {
+			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
+					  vnic->mc_list, vnic->mc_list_mapping);
+			vnic->mc_list = NULL;
+		}
+
+		if (vnic->rss_table) {
+			dma_free_coherent(&pdev->dev, PAGE_SIZE,
+					  vnic->rss_table,
+					  vnic->rss_table_dma_addr);
+			vnic->rss_table = NULL;
+		}
+
+		vnic->rss_hash_key = NULL;
+		vnic->flags = 0;
+	}
+}
+
+static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
+{
+	int i, rc = 0, size;
+	struct bnxt_vnic_info *vnic;
+	struct pci_dev *pdev = bp->pdev;
+	int max_rings;
+
+	for (i = 0; i < bp->nr_vnics; i++) {
+		vnic = &bp->vnic_info[i];
+
+		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
+			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
+
+			if (mem_size > 0) {
+				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
+				if (!vnic->uc_list) {
+					rc = -ENOMEM;
+					goto out;
+				}
+			}
+		}
+
+		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
+			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
+			vnic->mc_list =
+				dma_alloc_coherent(&pdev->dev,
+						   vnic->mc_list_size,
+						   &vnic->mc_list_mapping,
+						   GFP_KERNEL);
+			if (!vnic->mc_list) {
+				rc = -ENOMEM;
+				goto out;
+			}
+		}
+
+		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+			max_rings = bp->rx_nr_rings;
+		else
+			max_rings = 1;
+
+		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
+		if (!vnic->fw_grp_ids) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		/* Allocate rss table and hash key */
+		vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+						     &vnic->rss_table_dma_addr,
+						     GFP_KERNEL);
+		if (!vnic->rss_table) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
+
+		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+	}
+	return 0;
+
+out:
+	return rc;
+}
+
+static void bnxt_free_hwrm_resources(struct bnxt *bp)
+{
+	struct pci_dev *pdev = bp->pdev;
+
+	dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+			  bp->hwrm_cmd_resp_dma_addr);
+
+	bp->hwrm_cmd_resp_addr = NULL;
+	if (bp->hwrm_dbg_resp_addr) {
+		dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
+				  bp->hwrm_dbg_resp_addr,
+				  bp->hwrm_dbg_resp_dma_addr);
+
+		bp->hwrm_dbg_resp_addr = NULL;
+	}
+}
+
+static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
+{
+	struct pci_dev *pdev = bp->pdev;
+
+	bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+						   &bp->hwrm_cmd_resp_dma_addr,
+						   GFP_KERNEL);
+	if (!bp->hwrm_cmd_resp_addr)
+		return -ENOMEM;
+	bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
+						    HWRM_DBG_REG_BUF_SIZE,
+						    &bp->hwrm_dbg_resp_dma_addr,
+						    GFP_KERNEL);
+	if (!bp->hwrm_dbg_resp_addr)
+		netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
+
+	return 0;
+}
+
+static void bnxt_free_stats(struct bnxt *bp)
+{
+	u32 size, i;
+	struct pci_dev *pdev = bp->pdev;
+
+	if (!bp->bnapi)
+		return;
+
+	size = sizeof(struct ctx_hw_stats);
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+		if (cpr->hw_stats) {
+			dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
+					  cpr->hw_stats_map);
+			cpr->hw_stats = NULL;
+		}
+	}
+}
+
+static int bnxt_alloc_stats(struct bnxt *bp)
+{
+	u32 size, i;
+	struct pci_dev *pdev = bp->pdev;
+
+	size = sizeof(struct ctx_hw_stats);
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+		cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
+						   &cpr->hw_stats_map,
+						   GFP_KERNEL);
+		if (!cpr->hw_stats)
+			return -ENOMEM;
+
+		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+	}
+	return 0;
+}
+
+static void bnxt_clear_ring_indices(struct bnxt *bp)
+{
+	int i;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr;
+		struct bnxt_rx_ring_info *rxr;
+		struct bnxt_tx_ring_info *txr;
+
+		if (!bnapi)
+			continue;
+
+		cpr = &bnapi->cp_ring;
+		cpr->cp_raw_cons = 0;
+
+		txr = &bnapi->tx_ring;
+		txr->tx_prod = 0;
+		txr->tx_cons = 0;
+
+		rxr = &bnapi->rx_ring;
+		rxr->rx_prod = 0;
+		rxr->rx_agg_prod = 0;
+		rxr->rx_sw_agg_prod = 0;
+	}
+}
+
+static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
+{
+#ifdef CONFIG_RFS_ACCEL
+	int i;
+
+	/* Under rtnl_lock and all our NAPIs have been disabled.  It's
+	 * safe to delete the hash table.
+	 */
+	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+		struct hlist_head *head;
+		struct hlist_node *tmp;
+		struct bnxt_ntuple_filter *fltr;
+
+		head = &bp->ntp_fltr_hash_tbl[i];
+		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+			hlist_del(&fltr->hash);
+			kfree(fltr);
+		}
+	}
+	if (irq_reinit) {
+		kfree(bp->ntp_fltr_bmap);
+		bp->ntp_fltr_bmap = NULL;
+	}
+	bp->ntp_fltr_count = 0;
+#endif
+}
+
+static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+	int i, rc = 0;
+
+	if (!(bp->flags & BNXT_FLAG_RFS))
+		return 0;
+
+	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
+		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
+
+	bp->ntp_fltr_count = 0;
+	bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+				    GFP_KERNEL);
+
+	if (!bp->ntp_fltr_bmap)
+		rc = -ENOMEM;
+
+	return rc;
+#else
+	return 0;
+#endif
+}
+
+static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
+{
+	bnxt_free_vnic_attributes(bp);
+	bnxt_free_tx_rings(bp);
+	bnxt_free_rx_rings(bp);
+	bnxt_free_cp_rings(bp);
+	bnxt_free_ntp_fltrs(bp, irq_re_init);
+	if (irq_re_init) {
+		bnxt_free_stats(bp);
+		bnxt_free_ring_grps(bp);
+		bnxt_free_vnics(bp);
+		kfree(bp->bnapi);
+		bp->bnapi = NULL;
+	} else {
+		bnxt_clear_ring_indices(bp);
+	}
+}
+
+static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
+{
+	int i, rc, size, arr_size;
+	void *bnapi;
+
+	if (irq_re_init) {
+		/* Allocate bnapi mem pointer array and mem block for
+		 * all queues
+		 */
+		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
+				bp->cp_nr_rings);
+		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
+		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
+		if (!bnapi)
+			return -ENOMEM;
+
+		bp->bnapi = bnapi;
+		bnapi += arr_size;
+		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
+			bp->bnapi[i] = bnapi;
+			bp->bnapi[i]->index = i;
+			bp->bnapi[i]->bp = bp;
+		}
+
+		rc = bnxt_alloc_stats(bp);
+		if (rc)
+			goto alloc_mem_err;
+
+		rc = bnxt_alloc_ntp_fltrs(bp);
+		if (rc)
+			goto alloc_mem_err;
+
+		rc = bnxt_alloc_vnics(bp);
+		if (rc)
+			goto alloc_mem_err;
+	}
+
+	bnxt_init_ring_struct(bp);
+
+	rc = bnxt_alloc_rx_rings(bp);
+	if (rc)
+		goto alloc_mem_err;
+
+	rc = bnxt_alloc_tx_rings(bp);
+	if (rc)
+		goto alloc_mem_err;
+
+	rc = bnxt_alloc_cp_rings(bp);
+	if (rc)
+		goto alloc_mem_err;
+
+	bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
+				  BNXT_VNIC_UCAST_FLAG;
+	rc = bnxt_alloc_vnic_attributes(bp);
+	if (rc)
+		goto alloc_mem_err;
+	return 0;
+
+alloc_mem_err:
+	bnxt_free_mem(bp, true);
+	return rc;
+}
+
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
+			    u16 cmpl_ring, u16 target_id)
+{
+	struct hwrm_cmd_req_hdr *req = request;
+
+	req->cmpl_ring_req_type =
+		cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
+	req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
+	req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+}
+
+int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+	int i, intr_process, rc;
+	struct hwrm_cmd_req_hdr *req = msg;
+	u32 *data = msg;
+	__le32 *resp_len, *valid;
+	u16 cp_ring_id, len = 0;
+	struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+
+	req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
+	memset(resp, 0, PAGE_SIZE);
+	cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
+		      HWRM_CMPL_RING_MASK) >>
+		     HWRM_CMPL_RING_SFT;
+	intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+
+	/* Write request msg to hwrm channel */
+	__iowrite32_copy(bp->bar0, data, msg_len / 4);
+
+	/* currently supports only one outstanding message */
+	if (intr_process)
+		bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
+				       HWRM_SEQ_ID_MASK;
+
+	/* Ring channel doorbell */
+	writel(1, bp->bar0 + 0x100);
+
+	i = 0;
+	if (intr_process) {
+		/* Wait until hwrm response cmpl interrupt is processed */
+		while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
+		       i++ < timeout) {
+			usleep_range(600, 800);
+		}
+
+		if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
+			netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
+				   req->cmpl_ring_req_type);
+			return -1;
+		}
+	} else {
+		/* Check if response len is updated */
+		resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
+		for (i = 0; i < timeout; i++) {
+			len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
+			      HWRM_RESP_LEN_SFT;
+			if (len)
+				break;
+			usleep_range(600, 800);
+		}
+
+		if (i >= timeout) {
+			netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
+				   timeout, req->cmpl_ring_req_type,
+				   req->target_id_seq_id, *resp_len);
+			return -1;
+		}
+
+		/* Last word of resp contains valid bit */
+		valid = bp->hwrm_cmd_resp_addr + len - 4;
+		for (i = 0; i < timeout; i++) {
+			if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
+				break;
+			usleep_range(600, 800);
+		}
+
+		if (i >= timeout) {
+			netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
+				   timeout, req->cmpl_ring_req_type,
+				   req->target_id_seq_id, len, *valid);
+			return -1;
+		}
+	}
+
+	rc = le16_to_cpu(resp->error_code);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+			   le16_to_cpu(resp->req_type),
+			   le16_to_cpu(resp->seq_id), rc);
+		return rc;
+	}
+	return 0;
+}
+
+int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
+{
+	int rc;
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, msg, msg_len, timeout);
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
+{
+	struct hwrm_func_drv_rgtr_input req = {0};
+	int i;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
+
+	req.enables =
+		cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
+			    FUNC_DRV_RGTR_REQ_ENABLES_VER |
+			    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
+	/* TODO: current async event fwd bits are not defined and the firmware
+	 * only checks if it is non-zero to enable async event forwarding
+	 */
+	req.async_event_fwd[0] |= cpu_to_le32(1);
+	req.os_type = cpu_to_le16(1);
+	req.ver_maj = DRV_VER_MAJ;
+	req.ver_min = DRV_VER_MIN;
+	req.ver_upd = DRV_VER_UPD;
+
+	if (BNXT_PF(bp)) {
+		unsigned long vf_req_snif_bmap[4];
+		u32 *data = (u32 *)vf_req_snif_bmap;
+
+		memset(vf_req_snif_bmap, 0, 32);
+		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
+			__set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
+
+		for (i = 0; i < 8; i++) {
+			req.vf_req_fwd[i] = cpu_to_le32(*data);
+			data++;
+		}
+		req.enables |=
+			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
+	}
+
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
+{
+	u32 rc = 0;
+	struct hwrm_tunnel_dst_port_free_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
+	req.tunnel_type = tunnel_type;
+
+	switch (tunnel_type) {
+	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
+		req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
+		break;
+	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
+		req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
+		break;
+	default:
+		break;
+	}
+
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
+			   rc);
+	return rc;
+}
+
+static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
+					   u8 tunnel_type)
+{
+	u32 rc = 0;
+	struct hwrm_tunnel_dst_port_alloc_input req = {0};
+	struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
+
+	req.tunnel_type = tunnel_type;
+	req.tunnel_dst_port_val = port;
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
+			   rc);
+		goto err_out;
+	}
+
+	if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
+		bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+
+	else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
+		bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+err_out:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
+{
+	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
+	req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+	req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+	req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+	req.mask = cpu_to_le32(vnic->rx_mask);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
+					    struct bnxt_ntuple_filter *fltr)
+{
+	struct hwrm_cfa_ntuple_filter_free_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
+	req.ntuple_filter_id = fltr->filter_id;
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+#define BNXT_NTP_FLTR_FLAGS					\
+	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
+	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID)
+
+static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
+					     struct bnxt_ntuple_filter *fltr)
+{
+	int rc = 0;
+	struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
+	struct hwrm_cfa_ntuple_filter_alloc_output *resp =
+		bp->hwrm_cmd_resp_addr;
+	struct flow_keys *keys = &fltr->fkeys;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
+	req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
+
+	req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+
+	req.ethertype = htons(ETH_P_IP);
+	memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
+	req.ipaddr_type = 4;
+	req.ip_protocol = keys->basic.ip_proto;
+
+	req.src_ipaddr[0] = keys->addrs.v4addrs.src;
+	req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+	req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+	req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+
+	req.src_port = keys->ports.src;
+	req.src_port_mask = cpu_to_be16(0xffff);
+	req.dst_port = keys->ports.dst;
+	req.dst_port_mask = cpu_to_be16(0xffff);
+
+	req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc)
+		fltr->filter_id = resp->ntuple_filter_id;
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+#endif
+
+static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
+				     u8 *mac_addr)
+{
+	u32 rc = 0;
+	struct hwrm_cfa_l2_filter_alloc_input req = {0};
+	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
+	req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
+				CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+	req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
+	req.enables =
+		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
+			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID |
+			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
+	memcpy(req.l2_addr, mac_addr, ETH_ALEN);
+	req.l2_addr_mask[0] = 0xff;
+	req.l2_addr_mask[1] = 0xff;
+	req.l2_addr_mask[2] = 0xff;
+	req.l2_addr_mask[3] = 0xff;
+	req.l2_addr_mask[4] = 0xff;
+	req.l2_addr_mask[5] = 0xff;
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc)
+		bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
+							resp->l2_filter_id;
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
+{
+	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
+	int rc = 0;
+
+	/* Any associated ntuple filters will also be cleared by firmware. */
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < num_of_vnics; i++) {
+		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+		for (j = 0; j < vnic->uc_filter_count; j++) {
+			struct hwrm_cfa_l2_filter_free_input req = {0};
+
+			bnxt_hwrm_cmd_hdr_init(bp, &req,
+					       HWRM_CFA_L2_FILTER_FREE, -1, -1);
+
+			req.l2_filter_id = vnic->fw_l2_filter_id[j];
+
+			rc = _hwrm_send_message(bp, &req, sizeof(req),
+						HWRM_CMD_TIMEOUT);
+		}
+		vnic->uc_filter_count = 0;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+
+	return rc;
+}
+
+static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+{
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+	struct hwrm_vnic_tpa_cfg_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
+
+	if (tpa_flags) {
+		u16 mss = bp->dev->mtu - 40;
+		u32 nsegs, n, segs = 0, flags;
+
+		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
+			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
+			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
+			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
+			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
+		if (tpa_flags & BNXT_FLAG_GRO)
+			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
+
+		req.flags = cpu_to_le32(flags);
+
+		req.enables =
+			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
+				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS);
+
+		/* Number of segs are log2 units, and first packet is not
+		 * included as part of this units.
+		 */
+		if (mss <= PAGE_SIZE) {
+			n = PAGE_SIZE / mss;
+			nsegs = (MAX_SKB_FRAGS - 1) * n;
+		} else {
+			n = mss / PAGE_SIZE;
+			if (mss & (PAGE_SIZE - 1))
+				n++;
+			nsegs = (MAX_SKB_FRAGS - n) / n;
+		}
+
+		segs = ilog2(nsegs);
+		req.max_agg_segs = cpu_to_le16(segs);
+		req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+	}
+	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+{
+	u32 i, j, max_rings;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+	struct hwrm_vnic_rss_cfg_input req = {0};
+
+	if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
+	if (set_rss) {
+		vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
+				 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
+				 BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
+				 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
+
+		req.hash_type = cpu_to_le32(vnic->hash_type);
+
+		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+			max_rings = bp->rx_nr_rings;
+		else
+			max_rings = 1;
+
+		/* Fill the RSS indirection table with ring group ids */
+		for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
+			if (j == max_rings)
+				j = 0;
+			vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
+		}
+
+		req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+		req.hash_key_tbl_addr =
+			cpu_to_le64(vnic->rss_hash_key_dma_addr);
+	}
+	req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+{
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+	struct hwrm_vnic_plcmodes_cfg_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
+	req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
+				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+	req.enables =
+		cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
+			    VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+	/* thresholds not implemented in firmware yet */
+	req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+	req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
+{
+	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
+	req.rss_cos_lb_ctx_id =
+		cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
+
+	hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->nr_vnics; i++) {
+		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+		if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
+			bnxt_hwrm_vnic_ctx_free_one(bp, i);
+	}
+	bp->rsscos_nr_ctxs = 0;
+}
+
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
+{
+	int rc;
+	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
+	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
+						bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
+			       -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc)
+		bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
+			le16_to_cpu(resp->rss_cos_lb_ctx_id);
+	mutex_unlock(&bp->hwrm_cmd_lock);
+
+	return rc;
+}
+
+static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+{
+	int grp_idx = 0;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+	struct hwrm_vnic_cfg_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+	/* Only RSS support for now TBD: COS & LB */
+	req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
+				  VNIC_CFG_REQ_ENABLES_RSS_RULE);
+	req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+	req.cos_rule = cpu_to_le16(0xffff);
+	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
+		grp_idx = 0;
+	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
+		grp_idx = vnic_id - 1;
+
+	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
+	req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
+
+	req.lb_rule = cpu_to_le16(0xffff);
+	req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
+			      VLAN_HLEN);
+
+	if (bp->flags & BNXT_FLAG_STRIP_VLAN)
+		req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
+
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+{
+	u32 rc = 0;
+
+	if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+		struct hwrm_vnic_free_input req = {0};
+
+		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
+		req.vnic_id =
+			cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+
+		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+		if (rc)
+			return rc;
+		bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+	}
+	return rc;
+}
+
+static void bnxt_hwrm_vnic_free(struct bnxt *bp)
+{
+	u16 i;
+
+	for (i = 0; i < bp->nr_vnics; i++)
+		bnxt_hwrm_vnic_free_one(bp, i);
+}
+
+static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id,
+				u16 end_grp_id)
+{
+	u32 rc = 0, i, j;
+	struct hwrm_vnic_alloc_input req = {0};
+	struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	/* map ring groups to this vnic */
+	for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) {
+		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
+			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
+				   j, (end_grp_id - start_grp_id));
+			break;
+		}
+		bp->vnic_info[vnic_id].fw_grp_ids[j] =
+					bp->grp_info[i].fw_grp_id;
+	}
+
+	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+	if (vnic_id == 0)
+		req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc)
+		bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
+{
+	u16 i;
+	u32 rc = 0;
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < bp->rx_nr_rings; i++) {
+		struct hwrm_ring_grp_alloc_input req = {0};
+		struct hwrm_ring_grp_alloc_output *resp =
+					bp->hwrm_cmd_resp_addr;
+
+		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
+
+		req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+		req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id);
+		req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id);
+		req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx);
+
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc)
+			break;
+
+		bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id);
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
+{
+	u16 i;
+	u32 rc = 0;
+	struct hwrm_ring_grp_free_input req = {0};
+
+	if (!bp->grp_info)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
+			continue;
+		req.ring_group_id =
+			cpu_to_le32(bp->grp_info[i].fw_grp_id);
+
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc)
+			break;
+		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+				    struct bnxt_ring_struct *ring,
+				    u32 ring_type, u32 map_index,
+				    u32 stats_ctx_id)
+{
+	int rc = 0, err = 0;
+	struct hwrm_ring_alloc_input req = {0};
+	struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+	u16 ring_id;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
+
+	req.enables = 0;
+	if (ring->nr_pages > 1) {
+		req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
+		/* Page size is in log2 units */
+		req.page_size = BNXT_PAGE_SHIFT;
+		req.page_tbl_depth = 1;
+	} else {
+		req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
+	}
+	req.fbo = 0;
+	/* Association of ring index with doorbell index and MSIX number */
+	req.logical_id = cpu_to_le16(map_index);
+
+	switch (ring_type) {
+	case HWRM_RING_ALLOC_TX:
+		req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+		/* Association of transmit ring with completion ring */
+		req.cmpl_ring_id =
+			cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
+		req.length = cpu_to_le32(bp->tx_ring_mask + 1);
+		req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
+		req.queue_id = cpu_to_le16(ring->queue_id);
+		break;
+	case HWRM_RING_ALLOC_RX:
+		req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+		req.length = cpu_to_le32(bp->rx_ring_mask + 1);
+		break;
+	case HWRM_RING_ALLOC_AGG:
+		req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
+		req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
+		break;
+	case HWRM_RING_ALLOC_CMPL:
+		req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
+		req.length = cpu_to_le32(bp->cp_ring_mask + 1);
+		if (bp->flags & BNXT_FLAG_USING_MSIX)
+			req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+		break;
+	default:
+		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
+			   ring_type);
+		return -1;
+	}
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	err = le16_to_cpu(resp->error_code);
+	ring_id = le16_to_cpu(resp->ring_id);
+	mutex_unlock(&bp->hwrm_cmd_lock);
+
+	if (rc || err) {
+		switch (ring_type) {
+		case RING_FREE_REQ_RING_TYPE_CMPL:
+			netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
+				   rc, err);
+			return -1;
+
+		case RING_FREE_REQ_RING_TYPE_RX:
+			netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
+				   rc, err);
+			return -1;
+
+		case RING_FREE_REQ_RING_TYPE_TX:
+			netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
+				   rc, err);
+			return -1;
+
+		default:
+			netdev_err(bp->dev, "Invalid ring\n");
+			return -1;
+		}
+	}
+	ring->fw_ring_id = ring_id;
+	return rc;
+}
+
+static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+{
+	int i, rc = 0;
+
+	if (bp->cp_nr_rings) {
+		for (i = 0; i < bp->cp_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+			struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+			rc = hwrm_ring_alloc_send_msg(bp, ring,
+						      HWRM_RING_ALLOC_CMPL, i,
+						      INVALID_STATS_CTX_ID);
+			if (rc)
+				goto err_out;
+			cpr->cp_doorbell = bp->bar1 + i * 0x80;
+			BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+			bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+		}
+	}
+
+	if (bp->tx_nr_rings) {
+		for (i = 0; i < bp->tx_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+			struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+			u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
+
+			rc = hwrm_ring_alloc_send_msg(bp, ring,
+						      HWRM_RING_ALLOC_TX, i,
+						      fw_stats_ctx);
+			if (rc)
+				goto err_out;
+			txr->tx_doorbell = bp->bar1 + i * 0x80;
+		}
+	}
+
+	if (bp->rx_nr_rings) {
+		for (i = 0; i < bp->rx_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+			struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+
+			rc = hwrm_ring_alloc_send_msg(bp, ring,
+						      HWRM_RING_ALLOC_RX, i,
+						      INVALID_STATS_CTX_ID);
+			if (rc)
+				goto err_out;
+			rxr->rx_doorbell = bp->bar1 + i * 0x80;
+			writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+			bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
+		}
+	}
+
+	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+		for (i = 0; i < bp->rx_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+			struct bnxt_ring_struct *ring =
+						&rxr->rx_agg_ring_struct;
+
+			rc = hwrm_ring_alloc_send_msg(bp, ring,
+						      HWRM_RING_ALLOC_AGG,
+						      bp->rx_nr_rings + i,
+						      INVALID_STATS_CTX_ID);
+			if (rc)
+				goto err_out;
+
+			rxr->rx_agg_doorbell =
+				bp->bar1 + (bp->rx_nr_rings + i) * 0x80;
+			writel(DB_KEY_RX | rxr->rx_agg_prod,
+			       rxr->rx_agg_doorbell);
+			bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id;
+		}
+	}
+err_out:
+	return rc;
+}
+
+static int hwrm_ring_free_send_msg(struct bnxt *bp,
+				   struct bnxt_ring_struct *ring,
+				   u32 ring_type, int cmpl_ring_id)
+{
+	int rc;
+	struct hwrm_ring_free_input req = {0};
+	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
+	u16 error_code;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
+	req.ring_type = ring_type;
+	req.ring_id = cpu_to_le16(ring->fw_ring_id);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	error_code = le16_to_cpu(resp->error_code);
+	mutex_unlock(&bp->hwrm_cmd_lock);
+
+	if (rc || error_code) {
+		switch (ring_type) {
+		case RING_FREE_REQ_RING_TYPE_CMPL:
+			netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
+				   rc);
+			return rc;
+		case RING_FREE_REQ_RING_TYPE_RX:
+			netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
+				   rc);
+			return rc;
+		case RING_FREE_REQ_RING_TYPE_TX:
+			netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
+				   rc);
+			return rc;
+		default:
+			netdev_err(bp->dev, "Invalid ring\n");
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
+{
+	int i, rc = 0;
+
+	if (!bp->bnapi)
+		return 0;
+
+	if (bp->tx_nr_rings) {
+		for (i = 0; i < bp->tx_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
+			struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+			u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+			if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+				hwrm_ring_free_send_msg(
+					bp, ring,
+					RING_FREE_REQ_RING_TYPE_TX,
+					close_path ? cmpl_ring_id :
+					INVALID_HW_RING_ID);
+				ring->fw_ring_id = INVALID_HW_RING_ID;
+			}
+		}
+	}
+
+	if (bp->rx_nr_rings) {
+		for (i = 0; i < bp->rx_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+			struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+			u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+			if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+				hwrm_ring_free_send_msg(
+					bp, ring,
+					RING_FREE_REQ_RING_TYPE_RX,
+					close_path ? cmpl_ring_id :
+					INVALID_HW_RING_ID);
+				ring->fw_ring_id = INVALID_HW_RING_ID;
+				bp->grp_info[i].rx_fw_ring_id =
+					INVALID_HW_RING_ID;
+			}
+		}
+	}
+
+	if (bp->rx_agg_nr_pages) {
+		for (i = 0; i < bp->rx_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
+			struct bnxt_ring_struct *ring =
+						&rxr->rx_agg_ring_struct;
+			u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
+
+			if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+				hwrm_ring_free_send_msg(
+					bp, ring,
+					RING_FREE_REQ_RING_TYPE_RX,
+					close_path ? cmpl_ring_id :
+					INVALID_HW_RING_ID);
+				ring->fw_ring_id = INVALID_HW_RING_ID;
+				bp->grp_info[i].agg_fw_ring_id =
+					INVALID_HW_RING_ID;
+			}
+		}
+	}
+
+	if (bp->cp_nr_rings) {
+		for (i = 0; i < bp->cp_nr_rings; i++) {
+			struct bnxt_napi *bnapi = bp->bnapi[i];
+			struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+			struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+			if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+				hwrm_ring_free_send_msg(
+					bp, ring,
+					RING_FREE_REQ_RING_TYPE_CMPL,
+					INVALID_HW_RING_ID);
+				ring->fw_ring_id = INVALID_HW_RING_ID;
+				bp->grp_info[i].cp_fw_ring_id =
+							INVALID_HW_RING_ID;
+			}
+		}
+	}
+
+	return rc;
+}
+
+int bnxt_hwrm_set_coal(struct bnxt *bp)
+{
+	int i, rc = 0;
+	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+	u16 max_buf, max_buf_irq;
+	u16 buf_tmr, buf_tmr_irq;
+	u32 flags;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
+			       -1, -1);
+
+	/* Each rx completion (2 records) should be DMAed immediately */
+	max_buf = min_t(u16, bp->coal_bufs / 4, 2);
+	/* max_buf must not be zero */
+	max_buf = clamp_t(u16, max_buf, 1, 63);
+	max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
+	buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
+	buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
+
+	flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+
+	/* RING_IDLE generates more IRQs for lower latency.  Enable it only
+	 * if coal_ticks is less than 25 us.
+	 */
+	if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
+		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
+
+	req.flags = cpu_to_le16(flags);
+	req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
+	req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
+	req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
+	req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
+	req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
+	req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
+	req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
+
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc)
+			break;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
+{
+	int rc = 0, i;
+	struct hwrm_stat_ctx_free_input req = {0};
+
+	if (!bp->bnapi)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
+			req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
+
+			rc = _hwrm_send_message(bp, &req, sizeof(req),
+						HWRM_CMD_TIMEOUT);
+			if (rc)
+				break;
+
+			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
+		}
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
+{
+	int rc = 0, i;
+	struct hwrm_stat_ctx_alloc_input req = {0};
+	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+
+	req.update_period_ms = cpu_to_le32(1000);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+		req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
+
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc)
+			break;
+
+		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
+
+		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return 0;
+}
+
+static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_func_qcaps_input req = {0};
+	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		goto hwrm_func_qcaps_exit;
+
+	if (BNXT_PF(bp)) {
+		struct bnxt_pf_info *pf = &bp->pf;
+
+		pf->fw_fid = le16_to_cpu(resp->fid);
+		pf->port_id = le16_to_cpu(resp->port_id);
+		memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+		pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+		pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+		pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+		pf->max_pf_tx_rings = pf->max_tx_rings;
+		pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+		pf->max_pf_rx_rings = pf->max_rx_rings;
+		pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+		pf->max_vnics = le16_to_cpu(resp->max_vnics);
+		pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
+		pf->max_vfs = le16_to_cpu(resp->max_vfs);
+		pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
+		pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
+		pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+		pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+		pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+		pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+	} else {
+#ifdef CONFIG_BNXT_SRIOV
+		struct bnxt_vf_info *vf = &bp->vf;
+
+		vf->fw_fid = le16_to_cpu(resp->fid);
+		memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+		if (!is_valid_ether_addr(vf->mac_addr))
+			random_ether_addr(vf->mac_addr);
+
+		vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+		vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+		vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+		vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+		vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+		vf->max_vnics = le16_to_cpu(resp->max_vnics);
+		vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+#endif
+	}
+
+	bp->tx_push_thresh = 0;
+	if (resp->flags &
+	    cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
+hwrm_func_qcaps_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_func_reset(struct bnxt *bp)
+{
+	struct hwrm_func_reset_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
+	req.enables = 0;
+
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
+}
+
+static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
+{
+	int rc = 0;
+	struct hwrm_queue_qportcfg_input req = {0};
+	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+	u8 i, *qptr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		goto qportcfg_exit;
+
+	if (!resp->max_configurable_queues) {
+		rc = -EINVAL;
+		goto qportcfg_exit;
+	}
+	bp->max_tc = resp->max_configurable_queues;
+	if (bp->max_tc > BNXT_MAX_QUEUE)
+		bp->max_tc = BNXT_MAX_QUEUE;
+
+	qptr = &resp->queue_id0;
+	for (i = 0; i < bp->max_tc; i++) {
+		bp->q_info[i].queue_id = *qptr++;
+		bp->q_info[i].queue_profile = *qptr++;
+	}
+
+qportcfg_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+	int rc;
+	struct hwrm_ver_get_input req = {0};
+	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
+	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+	req.hwrm_intf_min = HWRM_VERSION_MINOR;
+	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc)
+		goto hwrm_ver_get_exit;
+
+	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+	if (req.hwrm_intf_maj != resp->hwrm_intf_maj ||
+	    req.hwrm_intf_min != resp->hwrm_intf_min ||
+	    req.hwrm_intf_upd != resp->hwrm_intf_upd) {
+		netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n",
+			    resp->hwrm_intf_maj, resp->hwrm_intf_min,
+			    resp->hwrm_intf_upd, req.hwrm_intf_maj,
+			    req.hwrm_intf_min, req.hwrm_intf_upd);
+		netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n");
+	}
+	snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
+		 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
+		 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
+
+hwrm_ver_get_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
+{
+	if (bp->vxlan_port_cnt) {
+		bnxt_hwrm_tunnel_dst_port_free(
+			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+	}
+	bp->vxlan_port_cnt = 0;
+	if (bp->nge_port_cnt) {
+		bnxt_hwrm_tunnel_dst_port_free(
+			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+	}
+	bp->nge_port_cnt = 0;
+}
+
+static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
+{
+	int rc, i;
+	u32 tpa_flags = 0;
+
+	if (set_tpa)
+		tpa_flags = bp->flags & BNXT_FLAG_TPA;
+	for (i = 0; i < bp->nr_vnics; i++) {
+		rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+		if (rc) {
+			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
+				   rc, i);
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->nr_vnics; i++)
+		bnxt_hwrm_vnic_set_rss(bp, i, false);
+}
+
+static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
+				    bool irq_re_init)
+{
+	if (bp->vnic_info) {
+		bnxt_hwrm_clear_vnic_filter(bp);
+		/* clear all RSS setting before free vnic ctx */
+		bnxt_hwrm_clear_vnic_rss(bp);
+		bnxt_hwrm_vnic_ctx_free(bp);
+		/* before free the vnic, undo the vnic tpa settings */
+		if (bp->flags & BNXT_FLAG_TPA)
+			bnxt_set_tpa(bp, false);
+		bnxt_hwrm_vnic_free(bp);
+	}
+	bnxt_hwrm_ring_free(bp, close_path);
+	bnxt_hwrm_ring_grp_free(bp);
+	if (irq_re_init) {
+		bnxt_hwrm_stat_ctx_free(bp);
+		bnxt_hwrm_free_tunnel_ports(bp);
+	}
+}
+
+static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+{
+	int rc;
+
+	/* allocate context for vnic */
+	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+			   vnic_id, rc);
+		goto vnic_setup_err;
+	}
+	bp->rsscos_nr_ctxs++;
+
+	/* configure default vnic, ring grp */
+	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+			   vnic_id, rc);
+		goto vnic_setup_err;
+	}
+
+	/* Enable RSS hashing on vnic */
+	rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
+			   vnic_id, rc);
+		goto vnic_setup_err;
+	}
+
+	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+		rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+		if (rc) {
+			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
+				   vnic_id, rc);
+		}
+	}
+
+vnic_setup_err:
+	return rc;
+}
+
+static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
+{
+#ifdef CONFIG_RFS_ACCEL
+	int i, rc = 0;
+
+	for (i = 0; i < bp->rx_nr_rings; i++) {
+		u16 vnic_id = i + 1;
+		u16 ring_id = i;
+
+		if (vnic_id >= bp->nr_vnics)
+			break;
+
+		bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
+		rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1);
+		if (rc) {
+			netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+				   vnic_id, rc);
+			break;
+		}
+		rc = bnxt_setup_vnic(bp, vnic_id);
+		if (rc)
+			break;
+	}
+	return rc;
+#else
+	return 0;
+#endif
+}
+
+static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
+{
+	int rc = 0;
+
+	if (irq_re_init) {
+		rc = bnxt_hwrm_stat_ctx_alloc(bp);
+		if (rc) {
+			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
+				   rc);
+			goto err_out;
+		}
+	}
+
+	rc = bnxt_hwrm_ring_alloc(bp);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
+		goto err_out;
+	}
+
+	rc = bnxt_hwrm_ring_grp_alloc(bp);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
+		goto err_out;
+	}
+
+	/* default vnic 0 */
+	rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
+		goto err_out;
+	}
+
+	rc = bnxt_setup_vnic(bp, 0);
+	if (rc)
+		goto err_out;
+
+	if (bp->flags & BNXT_FLAG_RFS) {
+		rc = bnxt_alloc_rfs_vnics(bp);
+		if (rc)
+			goto err_out;
+	}
+
+	if (bp->flags & BNXT_FLAG_TPA) {
+		rc = bnxt_set_tpa(bp, true);
+		if (rc)
+			goto err_out;
+	}
+
+	if (BNXT_VF(bp))
+		bnxt_update_vf_mac(bp);
+
+	/* Filter for default vnic 0 */
+	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
+	if (rc) {
+		netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+		goto err_out;
+	}
+	bp->vnic_info[0].uc_filter_count = 1;
+
+	bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST |
+				   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+
+	if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+		bp->vnic_info[0].rx_mask |=
+				CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+	if (rc) {
+		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc);
+		goto err_out;
+	}
+
+	rc = bnxt_hwrm_set_coal(bp);
+	if (rc)
+		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
+			    rc);
+
+	return 0;
+
+err_out:
+	bnxt_hwrm_resource_free(bp, 0, true);
+
+	return rc;
+}
+
+static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
+{
+	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
+	return 0;
+}
+
+static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
+{
+	bnxt_init_rx_rings(bp);
+	bnxt_init_tx_rings(bp);
+	bnxt_init_ring_grps(bp, irq_re_init);
+	bnxt_init_vnics(bp);
+
+	return bnxt_init_chip(bp, irq_re_init);
+}
+
+static void bnxt_disable_int(struct bnxt *bp)
+{
+	int i;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+		BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+	}
+}
+
+static void bnxt_enable_int(struct bnxt *bp)
+{
+	int i;
+
+	atomic_set(&bp->intr_sem, 0);
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+		BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+	}
+}
+
+static int bnxt_set_real_num_queues(struct bnxt *bp)
+{
+	int rc;
+	struct net_device *dev = bp->dev;
+
+	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
+	if (rc)
+		return rc;
+
+	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
+	if (rc)
+		return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+	if (bp->rx_nr_rings)
+		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
+	if (!dev->rx_cpu_rmap)
+		rc = -ENOMEM;
+#endif
+
+	return rc;
+}
+
+static int bnxt_setup_msix(struct bnxt *bp)
+{
+	struct msix_entry *msix_ent;
+	struct net_device *dev = bp->dev;
+	int i, total_vecs, rc = 0;
+	const int len = sizeof(bp->irq_tbl[0].name);
+
+	bp->flags &= ~BNXT_FLAG_USING_MSIX;
+	total_vecs = bp->cp_nr_rings;
+
+	msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
+	if (!msix_ent)
+		return -ENOMEM;
+
+	for (i = 0; i < total_vecs; i++) {
+		msix_ent[i].entry = i;
+		msix_ent[i].vector = 0;
+	}
+
+	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs);
+	if (total_vecs < 0) {
+		rc = -ENODEV;
+		goto msix_setup_exit;
+	}
+
+	bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+	if (bp->irq_tbl) {
+		int tcs;
+
+		/* Trim rings based upon num of vectors allocated */
+		bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings);
+		bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings);
+		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+		tcs = netdev_get_num_tc(dev);
+		if (tcs > 1) {
+			bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
+			if (bp->tx_nr_rings_per_tc == 0) {
+				netdev_reset_tc(dev);
+				bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+			} else {
+				int i, off, count;
+
+				bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+				for (i = 0; i < tcs; i++) {
+					count = bp->tx_nr_rings_per_tc;
+					off = i * count;
+					netdev_set_tc_queue(dev, i, count, off);
+				}
+			}
+		}
+		bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+
+		for (i = 0; i < bp->cp_nr_rings; i++) {
+			bp->irq_tbl[i].vector = msix_ent[i].vector;
+			snprintf(bp->irq_tbl[i].name, len,
+				 "%s-%s-%d", dev->name, "TxRx", i);
+			bp->irq_tbl[i].handler = bnxt_msix;
+		}
+		rc = bnxt_set_real_num_queues(bp);
+		if (rc)
+			goto msix_setup_exit;
+	} else {
+		rc = -ENOMEM;
+		goto msix_setup_exit;
+	}
+	bp->flags |= BNXT_FLAG_USING_MSIX;
+	kfree(msix_ent);
+	return 0;
+
+msix_setup_exit:
+	netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
+	pci_disable_msix(bp->pdev);
+	kfree(msix_ent);
+	return rc;
+}
+
+static int bnxt_setup_inta(struct bnxt *bp)
+{
+	int rc;
+	const int len = sizeof(bp->irq_tbl[0].name);
+
+	if (netdev_get_num_tc(bp->dev))
+		netdev_reset_tc(bp->dev);
+
+	bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+	if (!bp->irq_tbl) {
+		rc = -ENOMEM;
+		return rc;
+	}
+	bp->rx_nr_rings = 1;
+	bp->tx_nr_rings = 1;
+	bp->cp_nr_rings = 1;
+	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+	bp->irq_tbl[0].vector = bp->pdev->irq;
+	snprintf(bp->irq_tbl[0].name, len,
+		 "%s-%s-%d", bp->dev->name, "TxRx", 0);
+	bp->irq_tbl[0].handler = bnxt_inta;
+	rc = bnxt_set_real_num_queues(bp);
+	return rc;
+}
+
+static int bnxt_setup_int_mode(struct bnxt *bp)
+{
+	int rc = 0;
+
+	if (bp->flags & BNXT_FLAG_MSIX_CAP)
+		rc = bnxt_setup_msix(bp);
+
+	if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+		/* fallback to INTA */
+		rc = bnxt_setup_inta(bp);
+	}
+	return rc;
+}
+
+static void bnxt_free_irq(struct bnxt *bp)
+{
+	struct bnxt_irq *irq;
+	int i;
+
+#ifdef CONFIG_RFS_ACCEL
+	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
+	bp->dev->rx_cpu_rmap = NULL;
+#endif
+	if (!bp->irq_tbl)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		irq = &bp->irq_tbl[i];
+		if (irq->requested)
+			free_irq(irq->vector, bp->bnapi[i]);
+		irq->requested = 0;
+	}
+	if (bp->flags & BNXT_FLAG_USING_MSIX)
+		pci_disable_msix(bp->pdev);
+	kfree(bp->irq_tbl);
+	bp->irq_tbl = NULL;
+}
+
+static int bnxt_request_irq(struct bnxt *bp)
+{
+	int i, rc = 0;
+	unsigned long flags = 0;
+#ifdef CONFIG_RFS_ACCEL
+	struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
+#endif
+
+	if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+		flags = IRQF_SHARED;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_irq *irq = &bp->irq_tbl[i];
+#ifdef CONFIG_RFS_ACCEL
+		if (rmap && (i < bp->rx_nr_rings)) {
+			rc = irq_cpu_rmap_add(rmap, irq->vector);
+			if (rc)
+				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
+					    i);
+		}
+#endif
+		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+				 bp->bnapi[i]);
+		if (rc)
+			break;
+
+		irq->requested = 1;
+	}
+	return rc;
+}
+
+static void bnxt_del_napi(struct bnxt *bp)
+{
+	int i;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+
+		napi_hash_del(&bnapi->napi);
+		netif_napi_del(&bnapi->napi);
+	}
+}
+
+static void bnxt_init_napi(struct bnxt *bp)
+{
+	int i;
+	struct bnxt_napi *bnapi;
+
+	if (bp->flags & BNXT_FLAG_USING_MSIX) {
+		for (i = 0; i < bp->cp_nr_rings; i++) {
+			bnapi = bp->bnapi[i];
+			netif_napi_add(bp->dev, &bnapi->napi,
+				       bnxt_poll, 64);
+			napi_hash_add(&bnapi->napi);
+		}
+	} else {
+		bnapi = bp->bnapi[0];
+		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+		napi_hash_add(&bnapi->napi);
+	}
+}
+
+static void bnxt_disable_napi(struct bnxt *bp)
+{
+	int i;
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		napi_disable(&bp->bnapi[i]->napi);
+		bnxt_disable_poll(bp->bnapi[i]);
+	}
+}
+
+static void bnxt_enable_napi(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		bnxt_enable_poll(bp->bnapi[i]);
+		napi_enable(&bp->bnapi[i]->napi);
+	}
+}
+
+static void bnxt_tx_disable(struct bnxt *bp)
+{
+	int i;
+	struct bnxt_napi *bnapi;
+	struct bnxt_tx_ring_info *txr;
+	struct netdev_queue *txq;
+
+	if (bp->bnapi) {
+		for (i = 0; i < bp->tx_nr_rings; i++) {
+			bnapi = bp->bnapi[i];
+			txr = &bnapi->tx_ring;
+			txq = netdev_get_tx_queue(bp->dev, i);
+			__netif_tx_lock(txq, smp_processor_id());
+			txr->dev_state = BNXT_DEV_STATE_CLOSING;
+			__netif_tx_unlock(txq);
+		}
+	}
+	/* Stop all TX queues */
+	netif_tx_disable(bp->dev);
+	netif_carrier_off(bp->dev);
+}
+
+static void bnxt_tx_enable(struct bnxt *bp)
+{
+	int i;
+	struct bnxt_napi *bnapi;
+	struct bnxt_tx_ring_info *txr;
+	struct netdev_queue *txq;
+
+	for (i = 0; i < bp->tx_nr_rings; i++) {
+		bnapi = bp->bnapi[i];
+		txr = &bnapi->tx_ring;
+		txq = netdev_get_tx_queue(bp->dev, i);
+		txr->dev_state = 0;
+	}
+	netif_tx_wake_all_queues(bp->dev);
+	if (bp->link_info.link_up)
+		netif_carrier_on(bp->dev);
+}
+
+static void bnxt_report_link(struct bnxt *bp)
+{
+	if (bp->link_info.link_up) {
+		const char *duplex;
+		const char *flow_ctrl;
+		u16 speed;
+
+		netif_carrier_on(bp->dev);
+		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
+			duplex = "full";
+		else
+			duplex = "half";
+		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
+			flow_ctrl = "ON - receive & transmit";
+		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
+			flow_ctrl = "ON - transmit";
+		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
+			flow_ctrl = "ON - receive";
+		else
+			flow_ctrl = "none";
+		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+			    speed, duplex, flow_ctrl);
+	} else {
+		netif_carrier_off(bp->dev);
+		netdev_err(bp->dev, "NIC Link is Down\n");
+	}
+}
+
+static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
+{
+	int rc = 0;
+	struct bnxt_link_info *link_info = &bp->link_info;
+	struct hwrm_port_phy_qcfg_input req = {0};
+	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+	u8 link_up = link_info->link_up;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc) {
+		mutex_unlock(&bp->hwrm_cmd_lock);
+		return rc;
+	}
+
+	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
+	link_info->phy_link_status = resp->link;
+	link_info->duplex =  resp->duplex;
+	link_info->pause = resp->pause;
+	link_info->auto_mode = resp->auto_mode;
+	link_info->auto_pause_setting = resp->auto_pause;
+	link_info->force_pause_setting = resp->force_pause;
+	link_info->duplex_setting = resp->duplex_setting;
+	if (link_info->phy_link_status == BNXT_LINK_LINK)
+		link_info->link_speed = le16_to_cpu(resp->link_speed);
+	else
+		link_info->link_speed = 0;
+	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
+	link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
+	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
+	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
+	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
+	link_info->phy_ver[0] = resp->phy_maj;
+	link_info->phy_ver[1] = resp->phy_min;
+	link_info->phy_ver[2] = resp->phy_bld;
+	link_info->media_type = resp->media_type;
+	link_info->transceiver = resp->transceiver_type;
+	link_info->phy_addr = resp->phy_addr;
+
+	/* TODO: need to add more logic to report VF link */
+	if (chng_link_state) {
+		if (link_info->phy_link_status == BNXT_LINK_LINK)
+			link_info->link_up = 1;
+		else
+			link_info->link_up = 0;
+		if (link_up != link_info->link_up)
+			bnxt_report_link(bp);
+	} else {
+		/* alwasy link down if not require to update link state */
+		link_info->link_up = 0;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return 0;
+}
+
+static void
+bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
+{
+	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
+		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
+		req->enables |=
+			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+	} else {
+		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
+			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
+		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
+			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
+		req->enables |=
+			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
+	}
+}
+
+static void bnxt_hwrm_set_link_common(struct bnxt *bp,
+				      struct hwrm_port_phy_cfg_input *req)
+{
+	u8 autoneg = bp->link_info.autoneg;
+	u16 fw_link_speed = bp->link_info.req_link_speed;
+	u32 advertising = bp->link_info.advertising;
+
+	if (autoneg & BNXT_AUTONEG_SPEED) {
+		req->auto_mode |=
+			PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
+
+		req->enables |= cpu_to_le32(
+			PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
+		req->auto_link_speed_mask = cpu_to_le16(advertising);
+
+		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
+		req->flags |=
+			cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
+	} else {
+		req->force_link_speed = cpu_to_le16(fw_link_speed);
+		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
+	}
+
+	/* currently don't support half duplex */
+	req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
+	req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
+	/* tell chimp that the setting takes effect immediately */
+	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+}
+
+int bnxt_hwrm_set_pause(struct bnxt *bp)
+{
+	struct hwrm_port_phy_cfg_input req = {0};
+	int rc;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+	bnxt_hwrm_set_pause_common(bp, &req);
+
+	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
+	    bp->link_info.force_link_chng)
+		bnxt_hwrm_set_link_common(bp, &req);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
+		/* since changing of pause setting doesn't trigger any link
+		 * change event, the driver needs to update the current pause
+		 * result upon successfully return of the phy_cfg command
+		 */
+		bp->link_info.pause =
+		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
+		bp->link_info.auto_pause_setting = 0;
+		if (!bp->link_info.force_link_chng)
+			bnxt_report_link(bp);
+	}
+	bp->link_info.force_link_chng = false;
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
+{
+	struct hwrm_port_phy_cfg_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+	if (set_pause)
+		bnxt_hwrm_set_pause_common(bp, &req);
+
+	bnxt_hwrm_set_link_common(bp, &req);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_update_phy_setting(struct bnxt *bp)
+{
+	int rc;
+	bool update_link = false;
+	bool update_pause = false;
+	struct bnxt_link_info *link_info = &bp->link_info;
+
+	rc = bnxt_update_link(bp, true);
+	if (rc) {
+		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
+			   rc);
+		return rc;
+	}
+	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+	    link_info->auto_pause_setting != link_info->req_flow_ctrl)
+		update_pause = true;
+	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
+	    link_info->force_pause_setting != link_info->req_flow_ctrl)
+		update_pause = true;
+	if (link_info->req_duplex != link_info->duplex_setting)
+		update_link = true;
+	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+		if (BNXT_AUTO_MODE(link_info->auto_mode))
+			update_link = true;
+		if (link_info->req_link_speed != link_info->force_link_speed)
+			update_link = true;
+	} else {
+		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
+			update_link = true;
+		if (link_info->advertising != link_info->auto_link_speeds)
+			update_link = true;
+		if (link_info->req_link_speed != link_info->auto_link_speed)
+			update_link = true;
+	}
+
+	if (update_link)
+		rc = bnxt_hwrm_set_link_setting(bp, update_pause);
+	else if (update_pause)
+		rc = bnxt_hwrm_set_pause(bp);
+	if (rc) {
+		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
+			   rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+	int rc = 0;
+
+	netif_carrier_off(bp->dev);
+	if (irq_re_init) {
+		rc = bnxt_setup_int_mode(bp);
+		if (rc) {
+			netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
+				   rc);
+			return rc;
+		}
+	}
+	if ((bp->flags & BNXT_FLAG_RFS) &&
+	    !(bp->flags & BNXT_FLAG_USING_MSIX)) {
+		/* disable RFS if falling back to INTA */
+		bp->dev->hw_features &= ~NETIF_F_NTUPLE;
+		bp->flags &= ~BNXT_FLAG_RFS;
+	}
+
+	rc = bnxt_alloc_mem(bp, irq_re_init);
+	if (rc) {
+		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+		goto open_err_free_mem;
+	}
+
+	if (irq_re_init) {
+		bnxt_init_napi(bp);
+		rc = bnxt_request_irq(bp);
+		if (rc) {
+			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
+			goto open_err;
+		}
+	}
+
+	bnxt_enable_napi(bp);
+
+	rc = bnxt_init_nic(bp, irq_re_init);
+	if (rc) {
+		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+		goto open_err;
+	}
+
+	if (link_re_init) {
+		rc = bnxt_update_phy_setting(bp);
+		if (rc)
+			goto open_err;
+	}
+
+	if (irq_re_init) {
+#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
+		vxlan_get_rx_port(bp->dev);
+#endif
+		if (!bnxt_hwrm_tunnel_dst_port_alloc(
+				bp, htons(0x17c1),
+				TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
+			bp->nge_port_cnt = 1;
+	}
+
+	bp->state = BNXT_STATE_OPEN;
+	bnxt_enable_int(bp);
+	/* Enable TX queues */
+	bnxt_tx_enable(bp);
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+	return 0;
+
+open_err:
+	bnxt_disable_napi(bp);
+	bnxt_del_napi(bp);
+
+open_err_free_mem:
+	bnxt_free_skbs(bp);
+	bnxt_free_irq(bp);
+	bnxt_free_mem(bp, true);
+	return rc;
+}
+
+/* rtnl_lock held */
+int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+	int rc = 0;
+
+	rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+	if (rc) {
+		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
+		dev_close(bp->dev);
+	}
+	return rc;
+}
+
+static int bnxt_open(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc = 0;
+
+	rc = bnxt_hwrm_func_reset(bp);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
+			   rc);
+		rc = -1;
+		return rc;
+	}
+	return __bnxt_open_nic(bp, true, true);
+}
+
+static void bnxt_disable_int_sync(struct bnxt *bp)
+{
+	int i;
+
+	atomic_inc(&bp->intr_sem);
+	if (!netif_running(bp->dev))
+		return;
+
+	bnxt_disable_int(bp);
+	for (i = 0; i < bp->cp_nr_rings; i++)
+		synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+{
+	int rc = 0;
+
+#ifdef CONFIG_BNXT_SRIOV
+	if (bp->sriov_cfg) {
+		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+						      !bp->sriov_cfg,
+						      BNXT_SRIOV_CFG_WAIT_TMO);
+		if (rc)
+			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+	}
+#endif
+	/* Change device state to avoid TX queue wake up's */
+	bnxt_tx_disable(bp);
+
+	bp->state = BNXT_STATE_CLOSED;
+	cancel_work_sync(&bp->sp_task);
+
+	/* Flush rings before disabling interrupts */
+	bnxt_shutdown_nic(bp, irq_re_init);
+
+	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
+
+	bnxt_disable_napi(bp);
+	bnxt_disable_int_sync(bp);
+	del_timer_sync(&bp->timer);
+	bnxt_free_skbs(bp);
+
+	if (irq_re_init) {
+		bnxt_free_irq(bp);
+		bnxt_del_napi(bp);
+	}
+	bnxt_free_mem(bp, irq_re_init);
+	return rc;
+}
+
+static int bnxt_close(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	bnxt_close_nic(bp, true, true);
+	return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		/* fallthru */
+	case SIOCGMIIREG: {
+		if (!netif_running(dev))
+			return -EAGAIN;
+
+		return 0;
+	}
+
+	case SIOCSMIIREG:
+		if (!netif_running(dev))
+			return -EAGAIN;
+
+		return 0;
+
+	default:
+		/* do nothing */
+		break;
+	}
+	return -EOPNOTSUPP;
+}
+
+static struct rtnl_link_stats64 *
+bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	u32 i;
+	struct bnxt *bp = netdev_priv(dev);
+
+	memset(stats, 0, sizeof(struct rtnl_link_stats64));
+
+	if (!bp->bnapi)
+		return stats;
+
+	/* TODO check if we need to synchronize with bnxt_close path */
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+		struct ctx_hw_stats *hw_stats = cpr->hw_stats;
+
+		stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
+		stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
+		stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
+
+		stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
+		stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
+		stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
+
+		stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
+		stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
+		stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
+
+		stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
+		stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
+		stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
+
+		stats->rx_missed_errors +=
+			le64_to_cpu(hw_stats->rx_discard_pkts);
+
+		stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
+
+		stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
+
+		stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
+	}
+
+	return stats;
+}
+
+static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
+{
+	struct net_device *dev = bp->dev;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+	struct netdev_hw_addr *ha;
+	u8 *haddr;
+	int mc_count = 0;
+	bool update = false;
+	int off = 0;
+
+	netdev_for_each_mc_addr(ha, dev) {
+		if (mc_count >= BNXT_MAX_MC_ADDRS) {
+			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+			vnic->mc_list_count = 0;
+			return false;
+		}
+		haddr = ha->addr;
+		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
+			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
+			update = true;
+		}
+		off += ETH_ALEN;
+		mc_count++;
+	}
+	if (mc_count)
+		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
+
+	if (mc_count != vnic->mc_list_count) {
+		vnic->mc_list_count = mc_count;
+		update = true;
+	}
+	return update;
+}
+
+static bool bnxt_uc_list_updated(struct bnxt *bp)
+{
+	struct net_device *dev = bp->dev;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+	struct netdev_hw_addr *ha;
+	int off = 0;
+
+	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
+		return true;
+
+	netdev_for_each_uc_addr(ha, dev) {
+		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
+			return true;
+
+		off += ETH_ALEN;
+	}
+	return false;
+}
+
+static void bnxt_set_rx_mode(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+	u32 mask = vnic->rx_mask;
+	bool mc_update = false;
+	bool uc_update;
+
+	if (!netif_running(dev))
+		return;
+
+	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
+		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
+		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+
+	/* Only allow PF to be in promiscuous mode */
+	if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+	uc_update = bnxt_uc_list_updated(bp);
+
+	if (dev->flags & IFF_ALLMULTI) {
+		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+		vnic->mc_list_count = 0;
+	} else {
+		mc_update = bnxt_mc_list_updated(bp, &mask);
+	}
+
+	if (mask != vnic->rx_mask || uc_update || mc_update) {
+		vnic->rx_mask = mask;
+
+		set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
+		schedule_work(&bp->sp_task);
+	}
+}
+
+static void bnxt_cfg_rx_mode(struct bnxt *bp)
+{
+	struct net_device *dev = bp->dev;
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+	struct netdev_hw_addr *ha;
+	int i, off = 0, rc;
+	bool uc_update;
+
+	netif_addr_lock_bh(dev);
+	uc_update = bnxt_uc_list_updated(bp);
+	netif_addr_unlock_bh(dev);
+
+	if (!uc_update)
+		goto skip_uc;
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 1; i < vnic->uc_filter_count; i++) {
+		struct hwrm_cfa_l2_filter_free_input req = {0};
+
+		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
+				       -1);
+
+		req.l2_filter_id = vnic->fw_l2_filter_id[i];
+
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+
+	vnic->uc_filter_count = 1;
+
+	netif_addr_lock_bh(dev);
+	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
+		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+	} else {
+		netdev_for_each_uc_addr(ha, dev) {
+			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
+			off += ETH_ALEN;
+			vnic->uc_filter_count++;
+		}
+	}
+	netif_addr_unlock_bh(dev);
+
+	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
+		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
+		if (rc) {
+			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
+				   rc);
+			vnic->uc_filter_count = i;
+		}
+	}
+
+skip_uc:
+	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+	if (rc)
+		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+			   rc);
+}
+
+static netdev_features_t bnxt_fix_features(struct net_device *dev,
+					   netdev_features_t features)
+{
+	return features;
+}
+
+static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	u32 flags = bp->flags;
+	u32 changes;
+	int rc = 0;
+	bool re_init = false;
+	bool update_tpa = false;
+
+	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
+	if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+		flags |= BNXT_FLAG_GRO;
+	if (features & NETIF_F_LRO)
+		flags |= BNXT_FLAG_LRO;
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		flags |= BNXT_FLAG_STRIP_VLAN;
+
+	if (features & NETIF_F_NTUPLE)
+		flags |= BNXT_FLAG_RFS;
+
+	changes = flags ^ bp->flags;
+	if (changes & BNXT_FLAG_TPA) {
+		update_tpa = true;
+		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
+		    (flags & BNXT_FLAG_TPA) == 0)
+			re_init = true;
+	}
+
+	if (changes & ~BNXT_FLAG_TPA)
+		re_init = true;
+
+	if (flags != bp->flags) {
+		u32 old_flags = bp->flags;
+
+		bp->flags = flags;
+
+		if (!netif_running(dev)) {
+			if (update_tpa)
+				bnxt_set_ring_params(bp);
+			return rc;
+		}
+
+		if (re_init) {
+			bnxt_close_nic(bp, false, false);
+			if (update_tpa)
+				bnxt_set_ring_params(bp);
+
+			return bnxt_open_nic(bp, false, false);
+		}
+		if (update_tpa) {
+			rc = bnxt_set_tpa(bp,
+					  (flags & BNXT_FLAG_TPA) ?
+					  true : false);
+			if (rc)
+				bp->flags = old_flags;
+		}
+	}
+	return rc;
+}
+
+static void bnxt_dbg_dump_states(struct bnxt *bp)
+{
+	int i;
+	struct bnxt_napi *bnapi;
+	struct bnxt_tx_ring_info *txr;
+	struct bnxt_rx_ring_info *rxr;
+	struct bnxt_cp_ring_info *cpr;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		bnapi = bp->bnapi[i];
+		txr = &bnapi->tx_ring;
+		rxr = &bnapi->rx_ring;
+		cpr = &bnapi->cp_ring;
+		if (netif_msg_drv(bp)) {
+			netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
+				    i, txr->tx_ring_struct.fw_ring_id,
+				    txr->tx_prod, txr->tx_cons);
+			netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
+				    i, rxr->rx_ring_struct.fw_ring_id,
+				    rxr->rx_prod,
+				    rxr->rx_agg_ring_struct.fw_ring_id,
+				    rxr->rx_agg_prod, rxr->rx_sw_agg_prod);
+			netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
+				    i, cpr->cp_ring_struct.fw_ring_id,
+				    cpr->cp_raw_cons);
+		}
+	}
+}
+
+static void bnxt_reset_task(struct bnxt *bp)
+{
+	bnxt_dbg_dump_states(bp);
+	if (netif_running(bp->dev))
+		bnxt_tx_disable(bp); /* prevent tx timout again */
+}
+
+static void bnxt_tx_timeout(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
+	set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+	schedule_work(&bp->sp_task);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bnxt_poll_controller(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_irq *irq = &bp->irq_tbl[i];
+
+		disable_irq(irq->vector);
+		irq->handler(irq->vector, bp->bnapi[i]);
+		enable_irq(irq->vector);
+	}
+}
+#endif
+
+static void bnxt_timer(unsigned long data)
+{
+	struct bnxt *bp = (struct bnxt *)data;
+	struct net_device *dev = bp->dev;
+
+	if (!netif_running(dev))
+		return;
+
+	if (atomic_read(&bp->intr_sem) != 0)
+		goto bnxt_restart_timer;
+
+bnxt_restart_timer:
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *);
+
+static void bnxt_sp_task(struct work_struct *work)
+{
+	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
+	int rc;
+
+	if (bp->state != BNXT_STATE_OPEN)
+		return;
+
+	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
+		bnxt_cfg_rx_mode(bp);
+
+	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
+		bnxt_cfg_ntp_filters(bp);
+	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+		rc = bnxt_update_link(bp, true);
+		if (rc)
+			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+				   rc);
+	}
+	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
+		bnxt_hwrm_exec_fwd_req(bp);
+	if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
+		bnxt_hwrm_tunnel_dst_port_alloc(
+			bp, bp->vxlan_port,
+			TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+	}
+	if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
+		bnxt_hwrm_tunnel_dst_port_free(
+			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+	}
+	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+		bnxt_reset_task(bp);
+}
+
+static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
+{
+	int rc;
+	struct bnxt *bp = netdev_priv(dev);
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	/* enable device (incl. PCI PM wakeup), and bus-mastering */
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+		goto init_err;
+	}
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		dev_err(&pdev->dev,
+			"Cannot find PCI device base address, aborting\n");
+		rc = -ENODEV;
+		goto init_err_disable;
+	}
+
+	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+		goto init_err_disable;
+	}
+
+	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
+	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
+		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+		goto init_err_disable;
+	}
+
+	pci_set_master(pdev);
+
+	bp->dev = dev;
+	bp->pdev = pdev;
+
+	bp->bar0 = pci_ioremap_bar(pdev, 0);
+	if (!bp->bar0) {
+		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+		rc = -ENOMEM;
+		goto init_err_release;
+	}
+
+	bp->bar1 = pci_ioremap_bar(pdev, 2);
+	if (!bp->bar1) {
+		dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
+		rc = -ENOMEM;
+		goto init_err_release;
+	}
+
+	bp->bar2 = pci_ioremap_bar(pdev, 4);
+	if (!bp->bar2) {
+		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
+		rc = -ENOMEM;
+		goto init_err_release;
+	}
+
+	INIT_WORK(&bp->sp_task, bnxt_sp_task);
+
+	spin_lock_init(&bp->ntp_fltr_lock);
+
+	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
+	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
+
+	bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
+	bp->coal_bufs = 20;
+	bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
+	bp->coal_bufs_irq = 2;
+
+	init_timer(&bp->timer);
+	bp->timer.data = (unsigned long)bp;
+	bp->timer.function = bnxt_timer;
+	bp->current_interval = BNXT_TIMER_INTERVAL;
+
+	bp->state = BNXT_STATE_CLOSED;
+
+	return 0;
+
+init_err_release:
+	if (bp->bar2) {
+		pci_iounmap(pdev, bp->bar2);
+		bp->bar2 = NULL;
+	}
+
+	if (bp->bar1) {
+		pci_iounmap(pdev, bp->bar1);
+		bp->bar1 = NULL;
+	}
+
+	if (bp->bar0) {
+		pci_iounmap(pdev, bp->bar0);
+		bp->bar0 = NULL;
+	}
+
+	pci_release_regions(pdev);
+
+init_err_disable:
+	pci_disable_device(pdev);
+
+init_err:
+	return rc;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mac_addr(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	return 0;
+}
+
+/* rtnl_lock held */
+static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (new_mtu < 60 || new_mtu > 9000)
+		return -EINVAL;
+
+	if (netif_running(dev))
+		bnxt_close_nic(bp, false, false);
+
+	dev->mtu = new_mtu;
+	bnxt_set_ring_params(bp);
+
+	if (netif_running(dev))
+		return bnxt_open_nic(bp, false, false);
+
+	return 0;
+}
+
+static int bnxt_setup_tc(struct net_device *dev, u8 tc)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (tc > bp->max_tc) {
+		netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
+			   tc, bp->max_tc);
+		return -EINVAL;
+	}
+
+	if (netdev_get_num_tc(dev) == tc)
+		return 0;
+
+	if (tc) {
+		int max_rx_rings, max_tx_rings;
+
+		bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+		if (bp->tx_nr_rings_per_tc * tc > max_tx_rings)
+			return -ENOMEM;
+	}
+
+	/* Needs to close the device and do hw resource re-allocations */
+	if (netif_running(bp->dev))
+		bnxt_close_nic(bp, true, false);
+
+	if (tc) {
+		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
+		netdev_set_num_tc(dev, tc);
+	} else {
+		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+		netdev_reset_tc(dev);
+	}
+	bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+	bp->num_stat_ctxs = bp->cp_nr_rings;
+
+	if (netif_running(bp->dev))
+		return bnxt_open_nic(bp, true, false);
+
+	return 0;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
+			    struct bnxt_ntuple_filter *f2)
+{
+	struct flow_keys *keys1 = &f1->fkeys;
+	struct flow_keys *keys2 = &f2->fkeys;
+
+	if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
+	    keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
+	    keys1->ports.ports == keys2->ports.ports &&
+	    keys1->basic.ip_proto == keys2->basic.ip_proto &&
+	    keys1->basic.n_proto == keys2->basic.n_proto &&
+	    ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
+		return true;
+
+	return false;
+}
+
+static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+			      u16 rxq_index, u32 flow_id)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_ntuple_filter *fltr, *new_fltr;
+	struct flow_keys *fkeys;
+	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+	int rc = 0, idx;
+	struct hlist_head *head;
+
+	if (skb->encapsulation)
+		return -EPROTONOSUPPORT;
+
+	new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
+	if (!new_fltr)
+		return -ENOMEM;
+
+	fkeys = &new_fltr->fkeys;
+	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
+		rc = -EPROTONOSUPPORT;
+		goto err_free;
+	}
+
+	if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
+	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
+	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
+		rc = -EPROTONOSUPPORT;
+		goto err_free;
+	}
+
+	memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
+
+	idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
+	head = &bp->ntp_fltr_hash_tbl[idx];
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(fltr, head, hash) {
+		if (bnxt_fltr_match(fltr, new_fltr)) {
+			rcu_read_unlock();
+			rc = 0;
+			goto err_free;
+		}
+	}
+	rcu_read_unlock();
+
+	spin_lock_bh(&bp->ntp_fltr_lock);
+	new_fltr->sw_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
+						  BNXT_NTP_FLTR_MAX_FLTR, 0);
+	if (new_fltr->sw_id < 0) {
+		spin_unlock_bh(&bp->ntp_fltr_lock);
+		rc = -ENOMEM;
+		goto err_free;
+	}
+
+	new_fltr->flow_id = flow_id;
+	new_fltr->rxq = rxq_index;
+	hlist_add_head_rcu(&new_fltr->hash, head);
+	bp->ntp_fltr_count++;
+	spin_unlock_bh(&bp->ntp_fltr_lock);
+
+	set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
+	schedule_work(&bp->sp_task);
+
+	return new_fltr->sw_id;
+
+err_free:
+	kfree(new_fltr);
+	return rc;
+}
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+	int i;
+
+	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+		struct hlist_head *head;
+		struct hlist_node *tmp;
+		struct bnxt_ntuple_filter *fltr;
+		int rc;
+
+		head = &bp->ntp_fltr_hash_tbl[i];
+		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
+			bool del = false;
+
+			if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
+				if (rps_may_expire_flow(bp->dev, fltr->rxq,
+							fltr->flow_id,
+							fltr->sw_id)) {
+					bnxt_hwrm_cfa_ntuple_filter_free(bp,
+									 fltr);
+					del = true;
+				}
+			} else {
+				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
+								       fltr);
+				if (rc)
+					del = true;
+				else
+					set_bit(BNXT_FLTR_VALID, &fltr->state);
+			}
+
+			if (del) {
+				spin_lock_bh(&bp->ntp_fltr_lock);
+				hlist_del_rcu(&fltr->hash);
+				bp->ntp_fltr_count--;
+				spin_unlock_bh(&bp->ntp_fltr_lock);
+				synchronize_rcu();
+				clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+				kfree(fltr);
+			}
+		}
+	}
+}
+
+#else
+
+static void bnxt_cfg_ntp_filters(struct bnxt *bp)
+{
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+				__be16 port)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return;
+
+	if (sa_family != AF_INET6 && sa_family != AF_INET)
+		return;
+
+	if (bp->vxlan_port_cnt && bp->vxlan_port != port)
+		return;
+
+	bp->vxlan_port_cnt++;
+	if (bp->vxlan_port_cnt == 1) {
+		bp->vxlan_port = port;
+		set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
+		schedule_work(&bp->sp_task);
+	}
+}
+
+static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
+				__be16 port)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return;
+
+	if (sa_family != AF_INET6 && sa_family != AF_INET)
+		return;
+
+	if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
+		bp->vxlan_port_cnt--;
+
+		if (bp->vxlan_port_cnt == 0) {
+			set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
+			schedule_work(&bp->sp_task);
+		}
+	}
+}
+
+static const struct net_device_ops bnxt_netdev_ops = {
+	.ndo_open		= bnxt_open,
+	.ndo_start_xmit		= bnxt_start_xmit,
+	.ndo_stop		= bnxt_close,
+	.ndo_get_stats64	= bnxt_get_stats64,
+	.ndo_set_rx_mode	= bnxt_set_rx_mode,
+	.ndo_do_ioctl		= bnxt_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= bnxt_change_mac_addr,
+	.ndo_change_mtu		= bnxt_change_mtu,
+	.ndo_fix_features	= bnxt_fix_features,
+	.ndo_set_features	= bnxt_set_features,
+	.ndo_tx_timeout		= bnxt_tx_timeout,
+#ifdef CONFIG_BNXT_SRIOV
+	.ndo_get_vf_config	= bnxt_get_vf_config,
+	.ndo_set_vf_mac		= bnxt_set_vf_mac,
+	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
+	.ndo_set_vf_rate	= bnxt_set_vf_bw,
+	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
+	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= bnxt_poll_controller,
+#endif
+	.ndo_setup_tc           = bnxt_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
+#endif
+	.ndo_add_vxlan_port	= bnxt_add_vxlan_port,
+	.ndo_del_vxlan_port	= bnxt_del_vxlan_port,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	.ndo_busy_poll		= bnxt_busy_poll,
+#endif
+};
+
+static void bnxt_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (BNXT_PF(bp))
+		bnxt_sriov_disable(bp);
+
+	unregister_netdev(dev);
+	cancel_work_sync(&bp->sp_task);
+	bp->sp_event = 0;
+
+	bnxt_free_hwrm_resources(bp);
+	pci_iounmap(pdev, bp->bar2);
+	pci_iounmap(pdev, bp->bar1);
+	pci_iounmap(pdev, bp->bar0);
+	free_netdev(dev);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static int bnxt_probe_phy(struct bnxt *bp)
+{
+	int rc = 0;
+	struct bnxt_link_info *link_info = &bp->link_info;
+	char phy_ver[PHY_VER_STR_LEN];
+
+	rc = bnxt_update_link(bp, false);
+	if (rc) {
+		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
+			   rc);
+		return rc;
+	}
+
+	/*initialize the ethool setting copy with NVM settings */
+	if (BNXT_AUTO_MODE(link_info->auto_mode))
+		link_info->autoneg |= BNXT_AUTONEG_SPEED;
+
+	if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+		if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
+			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+		link_info->req_flow_ctrl = link_info->auto_pause_setting;
+	} else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+		link_info->req_flow_ctrl = link_info->force_pause_setting;
+	}
+	link_info->req_duplex = link_info->duplex_setting;
+	if (link_info->autoneg & BNXT_AUTONEG_SPEED)
+		link_info->req_link_speed = link_info->auto_link_speed;
+	else
+		link_info->req_link_speed = link_info->force_link_speed;
+	link_info->advertising = link_info->auto_link_speeds;
+	snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
+		 link_info->phy_ver[0],
+		 link_info->phy_ver[1],
+		 link_info->phy_ver[2]);
+	strcat(bp->fw_ver_str, phy_ver);
+	return rc;
+}
+
+static int bnxt_get_max_irq(struct pci_dev *pdev)
+{
+	u16 ctrl;
+
+	if (!pdev->msix_cap)
+		return 1;
+
+	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+}
+
+void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
+{
+	int max_rings = 0;
+
+	if (BNXT_PF(bp)) {
+		*max_tx = bp->pf.max_pf_tx_rings;
+		*max_rx = bp->pf.max_pf_rx_rings;
+		max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+		max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
+	} else {
+#ifdef CONFIG_BNXT_SRIOV
+		*max_tx = bp->vf.max_tx_rings;
+		*max_rx = bp->vf.max_rx_rings;
+		max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
+		max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
+#endif
+	}
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		*max_rx >>= 1;
+
+	*max_rx = min_t(int, *max_rx, max_rings);
+	*max_tx = min_t(int, *max_tx, max_rings);
+}
+
+static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int version_printed;
+	struct net_device *dev;
+	struct bnxt *bp;
+	int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings;
+
+	if (version_printed++ == 0)
+		pr_info("%s", version);
+
+	max_irqs = bnxt_get_max_irq(pdev);
+	dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
+	if (!dev)
+		return -ENOMEM;
+
+	bp = netdev_priv(dev);
+
+	if (bnxt_vf_pciid(ent->driver_data))
+		bp->flags |= BNXT_FLAG_VF;
+
+	if (pdev->msix_cap) {
+		bp->flags |= BNXT_FLAG_MSIX_CAP;
+		if (BNXT_PF(bp))
+			bp->flags |= BNXT_FLAG_RFS;
+	}
+
+	rc = bnxt_init_board(pdev, dev);
+	if (rc < 0)
+		goto init_err_free;
+
+	dev->netdev_ops = &bnxt_netdev_ops;
+	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
+	dev->ethtool_ops = &bnxt_ethtool_ops;
+
+	pci_set_drvdata(pdev, dev);
+
+	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+			   NETIF_F_TSO | NETIF_F_TSO6 |
+			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+			   NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
+			   NETIF_F_RXHASH |
+			   NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
+
+	if (bp->flags & BNXT_FLAG_RFS)
+		dev->hw_features |= NETIF_F_NTUPLE;
+
+	dev->hw_enc_features =
+			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+			NETIF_F_TSO | NETIF_F_TSO6 |
+			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+			NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
+	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
+			    NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
+	dev->priv_flags |= IFF_UNICAST_FLT;
+
+#ifdef CONFIG_BNXT_SRIOV
+	init_waitqueue_head(&bp->sriov_cfg_wait);
+#endif
+	rc = bnxt_alloc_hwrm_resources(bp);
+	if (rc)
+		goto init_err;
+
+	mutex_init(&bp->hwrm_cmd_lock);
+	bnxt_hwrm_ver_get(bp);
+
+	rc = bnxt_hwrm_func_drv_rgtr(bp);
+	if (rc)
+		goto init_err;
+
+	/* Get the MAX capabilities for this function */
+	rc = bnxt_hwrm_func_qcaps(bp);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
+			   rc);
+		rc = -1;
+		goto init_err;
+	}
+
+	rc = bnxt_hwrm_queue_qportcfg(bp);
+	if (rc) {
+		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
+			   rc);
+		rc = -1;
+		goto init_err;
+	}
+
+	bnxt_set_tpa_flags(bp);
+	bnxt_set_ring_params(bp);
+	dflt_rings = netif_get_num_default_rss_queues();
+	if (BNXT_PF(bp)) {
+		memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+		bp->pf.max_irqs = max_irqs;
+	} else {
+#if defined(CONFIG_BNXT_SRIOV)
+		memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+		bp->vf.max_irqs = max_irqs;
+#endif
+	}
+	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
+	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+	bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
+	bp->num_stat_ctxs = bp->cp_nr_rings;
+
+	if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
+		bp->flags |= BNXT_FLAG_STRIP_VLAN;
+
+	rc = bnxt_probe_phy(bp);
+	if (rc)
+		goto init_err;
+
+	rc = register_netdev(dev);
+	if (rc)
+		goto init_err;
+
+	netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
+		    board_info[ent->driver_data].name,
+		    (long)pci_resource_start(pdev, 0), dev->dev_addr);
+
+	return 0;
+
+init_err:
+	pci_iounmap(pdev, bp->bar0);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+init_err_free:
+	free_netdev(dev);
+	return rc;
+}
+
+static struct pci_driver bnxt_pci_driver = {
+	.name		= DRV_MODULE_NAME,
+	.id_table	= bnxt_pci_tbl,
+	.probe		= bnxt_init_one,
+	.remove		= bnxt_remove_one,
+#if defined(CONFIG_BNXT_SRIOV)
+	.sriov_configure = bnxt_sriov_configure,
+#endif
+};
+
+module_pci_driver(bnxt_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
new file mode 100644
index 0000000..4f2267c
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -0,0 +1,1086 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_H
+#define BNXT_H
+
+#define DRV_MODULE_NAME		"bnxt_en"
+#define DRV_MODULE_VERSION	"0.1.24"
+
+#define DRV_VER_MAJ	0
+#define DRV_VER_MIN	1
+#define DRV_VER_UPD	24
+
+struct tx_bd {
+	__le32 tx_bd_len_flags_type;
+	#define TX_BD_TYPE					(0x3f << 0)
+	 #define TX_BD_TYPE_SHORT_TX_BD				 (0x00 << 0)
+	 #define TX_BD_TYPE_LONG_TX_BD				 (0x10 << 0)
+	#define TX_BD_FLAGS_PACKET_END				(1 << 6)
+	#define TX_BD_FLAGS_NO_CMPL				(1 << 7)
+	#define TX_BD_FLAGS_BD_CNT				(0x1f << 8)
+	 #define TX_BD_FLAGS_BD_CNT_SHIFT			 8
+	#define TX_BD_FLAGS_LHINT				(3 << 13)
+	 #define TX_BD_FLAGS_LHINT_SHIFT			 13
+	 #define TX_BD_FLAGS_LHINT_512_AND_SMALLER		 (0 << 13)
+	 #define TX_BD_FLAGS_LHINT_512_TO_1023			 (1 << 13)
+	 #define TX_BD_FLAGS_LHINT_1024_TO_2047			 (2 << 13)
+	 #define TX_BD_FLAGS_LHINT_2048_AND_LARGER		 (3 << 13)
+	#define TX_BD_FLAGS_COAL_NOW				(1 << 15)
+	#define TX_BD_LEN					(0xffff << 16)
+	 #define TX_BD_LEN_SHIFT				 16
+
+	u32 tx_bd_opaque;
+	__le64 tx_bd_haddr;
+} __packed;
+
+struct tx_bd_ext {
+	__le32 tx_bd_hsize_lflags;
+	#define TX_BD_FLAGS_TCP_UDP_CHKSUM			(1 << 0)
+	#define TX_BD_FLAGS_IP_CKSUM				(1 << 1)
+	#define TX_BD_FLAGS_NO_CRC				(1 << 2)
+	#define TX_BD_FLAGS_STAMP				(1 << 3)
+	#define TX_BD_FLAGS_T_IP_CHKSUM				(1 << 4)
+	#define TX_BD_FLAGS_LSO					(1 << 5)
+	#define TX_BD_FLAGS_IPID_FMT				(1 << 6)
+	#define TX_BD_FLAGS_T_IPID				(1 << 7)
+	#define TX_BD_HSIZE					(0xff << 16)
+	 #define TX_BD_HSIZE_SHIFT				 16
+
+	__le32 tx_bd_mss;
+	__le32 tx_bd_cfa_action;
+	#define TX_BD_CFA_ACTION				(0xffff << 16)
+	 #define TX_BD_CFA_ACTION_SHIFT				 16
+
+	__le32 tx_bd_cfa_meta;
+	#define TX_BD_CFA_META_MASK                             0xfffffff
+	#define TX_BD_CFA_META_VID_MASK                         0xfff
+	#define TX_BD_CFA_META_PRI_MASK                         (0xf << 12)
+	 #define TX_BD_CFA_META_PRI_SHIFT                        12
+	#define TX_BD_CFA_META_TPID_MASK                        (3 << 16)
+	 #define TX_BD_CFA_META_TPID_SHIFT                       16
+	#define TX_BD_CFA_META_KEY                              (0xf << 28)
+	 #define TX_BD_CFA_META_KEY_SHIFT			 28
+	#define TX_BD_CFA_META_KEY_VLAN                         (1 << 28)
+};
+
+struct rx_bd {
+	__le32 rx_bd_len_flags_type;
+	#define RX_BD_TYPE					(0x3f << 0)
+	 #define RX_BD_TYPE_RX_PACKET_BD			 0x4
+	 #define RX_BD_TYPE_RX_BUFFER_BD			 0x5
+	 #define RX_BD_TYPE_RX_AGG_BD				 0x6
+	 #define RX_BD_TYPE_16B_BD_SIZE				 (0 << 4)
+	 #define RX_BD_TYPE_32B_BD_SIZE				 (1 << 4)
+	 #define RX_BD_TYPE_48B_BD_SIZE				 (2 << 4)
+	 #define RX_BD_TYPE_64B_BD_SIZE				 (3 << 4)
+	#define RX_BD_FLAGS_SOP					(1 << 6)
+	#define RX_BD_FLAGS_EOP					(1 << 7)
+	#define RX_BD_FLAGS_BUFFERS				(3 << 8)
+	 #define RX_BD_FLAGS_1_BUFFER_PACKET			 (0 << 8)
+	 #define RX_BD_FLAGS_2_BUFFER_PACKET			 (1 << 8)
+	 #define RX_BD_FLAGS_3_BUFFER_PACKET			 (2 << 8)
+	 #define RX_BD_FLAGS_4_BUFFER_PACKET			 (3 << 8)
+	#define RX_BD_LEN					(0xffff << 16)
+	 #define RX_BD_LEN_SHIFT				 16
+
+	u32 rx_bd_opaque;
+	__le64 rx_bd_haddr;
+};
+
+struct tx_cmp {
+	__le32 tx_cmp_flags_type;
+	#define CMP_TYPE					(0x3f << 0)
+	 #define CMP_TYPE_TX_L2_CMP				 0
+	 #define CMP_TYPE_RX_L2_CMP				 17
+	 #define CMP_TYPE_RX_AGG_CMP				 18
+	 #define CMP_TYPE_RX_L2_TPA_START_CMP			 19
+	 #define CMP_TYPE_RX_L2_TPA_END_CMP			 21
+	 #define CMP_TYPE_STATUS_CMP				 32
+	 #define CMP_TYPE_REMOTE_DRIVER_REQ			 34
+	 #define CMP_TYPE_REMOTE_DRIVER_RESP			 36
+	 #define CMP_TYPE_ERROR_STATUS				 48
+	 #define CMPL_BASE_TYPE_STAT_EJECT			 (0x1aUL << 0)
+	 #define CMPL_BASE_TYPE_HWRM_DONE			 (0x20UL << 0)
+	 #define CMPL_BASE_TYPE_HWRM_FWD_REQ			 (0x22UL << 0)
+	 #define CMPL_BASE_TYPE_HWRM_FWD_RESP			 (0x24UL << 0)
+	 #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT		 (0x2eUL << 0)
+
+	#define TX_CMP_FLAGS_ERROR				(1 << 6)
+	#define TX_CMP_FLAGS_PUSH				(1 << 7)
+
+	u32 tx_cmp_opaque;
+	__le32 tx_cmp_errors_v;
+	#define TX_CMP_V					(1 << 0)
+	#define TX_CMP_ERRORS_BUFFER_ERROR			(7 << 1)
+	 #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR		 0
+	 #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT		 2
+	 #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG	 4
+	 #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS		 5
+	 #define TX_CMP_ERRORS_ZERO_LENGTH_PKT			 (1 << 4)
+	 #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN			 (1 << 5)
+	 #define TX_CMP_ERRORS_DMA_ERROR			 (1 << 6)
+	 #define TX_CMP_ERRORS_HINT_TOO_SHORT			 (1 << 7)
+
+	__le32 tx_cmp_unsed_3;
+};
+
+struct rx_cmp {
+	__le32 rx_cmp_len_flags_type;
+	#define RX_CMP_CMP_TYPE					(0x3f << 0)
+	#define RX_CMP_FLAGS_ERROR				(1 << 6)
+	#define RX_CMP_FLAGS_PLACEMENT				(7 << 7)
+	#define RX_CMP_FLAGS_RSS_VALID				(1 << 10)
+	#define RX_CMP_FLAGS_UNUSED				(1 << 11)
+	 #define RX_CMP_FLAGS_ITYPES_SHIFT			 12
+	 #define RX_CMP_FLAGS_ITYPE_UNKNOWN			 (0 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_IP				 (1 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_TCP				 (2 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_UDP				 (3 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_FCOE			 (4 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_ROCE			 (5 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS			 (8 << 12)
+	 #define RX_CMP_FLAGS_ITYPE_PTP_W_TS			 (9 << 12)
+	#define RX_CMP_LEN					(0xffff << 16)
+	 #define RX_CMP_LEN_SHIFT				 16
+
+	u32 rx_cmp_opaque;
+	__le32 rx_cmp_misc_v1;
+	#define RX_CMP_V1					(1 << 0)
+	#define RX_CMP_AGG_BUFS					(0x1f << 1)
+	 #define RX_CMP_AGG_BUFS_SHIFT				 1
+	#define RX_CMP_RSS_HASH_TYPE				(0x7f << 9)
+	 #define RX_CMP_RSS_HASH_TYPE_SHIFT			 9
+	#define RX_CMP_PAYLOAD_OFFSET				(0xff << 16)
+	 #define RX_CMP_PAYLOAD_OFFSET_SHIFT			 16
+
+	__le32 rx_cmp_rss_hash;
+};
+
+#define RX_CMP_HASH_VALID(rxcmp)				\
+	((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
+
+#define RX_CMP_HASH_TYPE(rxcmp)					\
+	((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
+	 RX_CMP_RSS_HASH_TYPE_SHIFT)
+
+struct rx_cmp_ext {
+	__le32 rx_cmp_flags2;
+	#define RX_CMP_FLAGS2_IP_CS_CALC			0x1
+	#define RX_CMP_FLAGS2_L4_CS_CALC			(0x1 << 1)
+	#define RX_CMP_FLAGS2_T_IP_CS_CALC			(0x1 << 2)
+	#define RX_CMP_FLAGS2_T_L4_CS_CALC			(0x1 << 3)
+	#define RX_CMP_FLAGS2_META_FORMAT_VLAN			(0x1 << 4)
+	__le32 rx_cmp_meta_data;
+	#define RX_CMP_FLAGS2_METADATA_VID_MASK			0xfff
+	#define RX_CMP_FLAGS2_METADATA_TPID_MASK		0xffff0000
+	 #define RX_CMP_FLAGS2_METADATA_TPID_SFT		 16
+	__le32 rx_cmp_cfa_code_errors_v2;
+	#define RX_CMP_V					(1 << 0)
+	#define RX_CMPL_ERRORS_MASK				(0x7fff << 1)
+	 #define RX_CMPL_ERRORS_SFT				 1
+	#define RX_CMPL_ERRORS_BUFFER_ERROR_MASK		(0x7 << 1)
+	 #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER		 (0x0 << 1)
+	 #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT	 (0x1 << 1)
+	 #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP	 (0x2 << 1)
+	 #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT		 (0x3 << 1)
+	#define RX_CMPL_ERRORS_IP_CS_ERROR			(0x1 << 4)
+	#define RX_CMPL_ERRORS_L4_CS_ERROR			(0x1 << 5)
+	#define RX_CMPL_ERRORS_T_IP_CS_ERROR			(0x1 << 6)
+	#define RX_CMPL_ERRORS_T_L4_CS_ERROR			(0x1 << 7)
+	#define RX_CMPL_ERRORS_CRC_ERROR			(0x1 << 8)
+	#define RX_CMPL_ERRORS_T_PKT_ERROR_MASK			(0x7 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR		 (0x0 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION	 (0x1 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN	 (0x2 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR	 (0x3 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR	 (0x4 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR	 (0x5 << 9)
+	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL	 (0x6 << 9)
+	#define RX_CMPL_ERRORS_PKT_ERROR_MASK			(0xf << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR		 (0x0 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION	 (0x1 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN	 (0x2 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL		 (0x3 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR	 (0x4 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR	 (0x5 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN	 (0x6 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
+	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN	 (0x8 << 12)
+
+	#define RX_CMPL_CFA_CODE_MASK				(0xffff << 16)
+	 #define RX_CMPL_CFA_CODE_SFT				 16
+
+	__le32 rx_cmp_unused3;
+};
+
+#define RX_CMP_L2_ERRORS						\
+	cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
+
+#define RX_CMP_L4_CS_BITS						\
+	(cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
+
+#define RX_CMP_L4_CS_ERR_BITS						\
+	(cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
+
+#define RX_CMP_L4_CS_OK(rxcmp1)						\
+	    (((rxcmp1)->rx_cmp_flags2 &	RX_CMP_L4_CS_BITS) &&		\
+	     !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
+
+#define RX_CMP_ENCAP(rxcmp1)						\
+	    ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) &			\
+	     RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
+
+struct rx_agg_cmp {
+	__le32 rx_agg_cmp_len_flags_type;
+	#define RX_AGG_CMP_TYPE					(0x3f << 0)
+	#define RX_AGG_CMP_LEN					(0xffff << 16)
+	 #define RX_AGG_CMP_LEN_SHIFT				 16
+	u32 rx_agg_cmp_opaque;
+	__le32 rx_agg_cmp_v;
+	#define RX_AGG_CMP_V					(1 << 0)
+	__le32 rx_agg_cmp_unused;
+};
+
+struct rx_tpa_start_cmp {
+	__le32 rx_tpa_start_cmp_len_flags_type;
+	#define RX_TPA_START_CMP_TYPE				(0x3f << 0)
+	#define RX_TPA_START_CMP_FLAGS				(0x3ff << 6)
+	 #define RX_TPA_START_CMP_FLAGS_SHIFT			 6
+	#define RX_TPA_START_CMP_FLAGS_PLACEMENT		(0x7 << 7)
+	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT		 7
+	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO		 (0x1 << 7)
+	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS		 (0x2 << 7)
+	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO	 (0x5 << 7)
+	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS	 (0x6 << 7)
+	#define RX_TPA_START_CMP_FLAGS_RSS_VALID		(0x1 << 10)
+	#define RX_TPA_START_CMP_FLAGS_ITYPES			(0xf << 12)
+	 #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT		 12
+	 #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP		 (0x2 << 12)
+	#define RX_TPA_START_CMP_LEN				(0xffff << 16)
+	 #define RX_TPA_START_CMP_LEN_SHIFT			 16
+
+	u32 rx_tpa_start_cmp_opaque;
+	__le32 rx_tpa_start_cmp_misc_v1;
+	#define RX_TPA_START_CMP_V1				(0x1 << 0)
+	#define RX_TPA_START_CMP_RSS_HASH_TYPE			(0x7f << 9)
+	 #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT		 9
+	#define RX_TPA_START_CMP_AGG_ID				(0x7f << 25)
+	 #define RX_TPA_START_CMP_AGG_ID_SHIFT			 25
+
+	__le32 rx_tpa_start_cmp_rss_hash;
+};
+
+#define TPA_START_HASH_VALID(rx_tpa_start)				\
+	((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type &		\
+	 cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
+
+#define TPA_START_HASH_TYPE(rx_tpa_start)				\
+	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
+	  RX_TPA_START_CMP_RSS_HASH_TYPE) >>				\
+	 RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT)
+
+#define TPA_START_AGG_ID(rx_tpa_start)					\
+	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
+	 RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+
+struct rx_tpa_start_cmp_ext {
+	__le32 rx_tpa_start_cmp_flags2;
+	#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC		(0x1 << 0)
+	#define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC		(0x1 << 1)
+	#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC		(0x1 << 2)
+	#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC		(0x1 << 3)
+
+	__le32 rx_tpa_start_cmp_metadata;
+	__le32 rx_tpa_start_cmp_cfa_code_v2;
+	#define RX_TPA_START_CMP_V2				(0x1 << 0)
+	#define RX_TPA_START_CMP_CFA_CODE			(0xffff << 16)
+	 #define RX_TPA_START_CMPL_CFA_CODE_SHIFT		 16
+	__le32 rx_tpa_start_cmp_unused5;
+};
+
+struct rx_tpa_end_cmp {
+	__le32 rx_tpa_end_cmp_len_flags_type;
+	#define RX_TPA_END_CMP_TYPE				(0x3f << 0)
+	#define RX_TPA_END_CMP_FLAGS				(0x3ff << 6)
+	 #define RX_TPA_END_CMP_FLAGS_SHIFT			 6
+	#define RX_TPA_END_CMP_FLAGS_PLACEMENT			(0x7 << 7)
+	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT		 7
+	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO		 (0x1 << 7)
+	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS		 (0x2 << 7)
+	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO	 (0x5 << 7)
+	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS		 (0x6 << 7)
+	#define RX_TPA_END_CMP_FLAGS_RSS_VALID			(0x1 << 10)
+	#define RX_TPA_END_CMP_FLAGS_ITYPES			(0xf << 12)
+	 #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT		 12
+	 #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP			 (0x2 << 12)
+	#define RX_TPA_END_CMP_LEN				(0xffff << 16)
+	 #define RX_TPA_END_CMP_LEN_SHIFT			 16
+
+	u32 rx_tpa_end_cmp_opaque;
+	__le32 rx_tpa_end_cmp_misc_v1;
+	#define RX_TPA_END_CMP_V1				(0x1 << 0)
+	#define RX_TPA_END_CMP_AGG_BUFS				(0x3f << 1)
+	 #define RX_TPA_END_CMP_AGG_BUFS_SHIFT			 1
+	#define RX_TPA_END_CMP_TPA_SEGS				(0xff << 8)
+	 #define RX_TPA_END_CMP_TPA_SEGS_SHIFT			 8
+	#define RX_TPA_END_CMP_PAYLOAD_OFFSET			(0xff << 16)
+	 #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT		 16
+	#define RX_TPA_END_CMP_AGG_ID				(0x7f << 25)
+	 #define RX_TPA_END_CMP_AGG_ID_SHIFT			 25
+
+	__le32 rx_tpa_end_cmp_tsdelta;
+	#define RX_TPA_END_GRO_TS				(0x1 << 31)
+};
+
+#define TPA_END_AGG_ID(rx_tpa_end)					\
+	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
+	 RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+
+#define TPA_END_TPA_SEGS(rx_tpa_end)					\
+	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
+	 RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
+
+#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO				\
+	cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO &		\
+		    RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
+
+#define TPA_END_GRO(rx_tpa_end)						\
+	((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type &			\
+	 RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
+
+#define TPA_END_GRO_TS(rx_tpa_end)					\
+	((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS))
+
+struct rx_tpa_end_cmp_ext {
+	__le32 rx_tpa_end_cmp_dup_acks;
+	#define RX_TPA_END_CMP_TPA_DUP_ACKS			(0xf << 0)
+
+	__le32 rx_tpa_end_cmp_seg_len;
+	#define RX_TPA_END_CMP_TPA_SEG_LEN			(0xffff << 0)
+
+	__le32 rx_tpa_end_cmp_errors_v2;
+	#define RX_TPA_END_CMP_V2				(0x1 << 0)
+	#define RX_TPA_END_CMP_ERRORS				(0x7fff << 1)
+	#define RX_TPA_END_CMPL_ERRORS_SHIFT			 1
+
+	u32 rx_tpa_end_cmp_start_opaque;
+};
+
+#define DB_IDX_MASK						0xffffff
+#define DB_IDX_VALID						(0x1 << 26)
+#define DB_IRQ_DIS						(0x1 << 27)
+#define DB_KEY_TX						(0x0 << 28)
+#define DB_KEY_RX						(0x1 << 28)
+#define DB_KEY_CP						(0x2 << 28)
+#define DB_KEY_ST						(0x3 << 28)
+#define DB_KEY_TX_PUSH						(0x4 << 28)
+#define DB_LONG_TX_PUSH						(0x2 << 24)
+
+#define INVALID_HW_RING_ID	((u16)-1)
+
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV4		0x01
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4	0x02
+#define BNXT_RSS_HASH_TYPE_FLAG_IPV6		0x04
+#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6	0x08
+
+/* The hardware supports certain page sizes.  Use the supported page sizes
+ * to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNXT_PAGE_SHIFT	12
+#elif (PAGE_SHIFT <= 13)
+#define BNXT_PAGE_SHIFT	PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNXT_PAGE_SHIFT	13
+#else
+#define BNXT_PAGE_SHIFT	16
+#endif
+
+#define BNXT_PAGE_SIZE	(1 << BNXT_PAGE_SHIFT)
+
+#define BNXT_MIN_PKT_SIZE	45
+
+#define BNXT_NUM_TESTS(bp)	0
+
+#define BNXT_DEFAULT_RX_RING_SIZE	1023
+#define BNXT_DEFAULT_TX_RING_SIZE	512
+
+#define MAX_TPA		64
+
+#define MAX_RX_PAGES	8
+#define MAX_RX_AGG_PAGES	32
+#define MAX_TX_PAGES	8
+#define MAX_CP_PAGES	64
+
+#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
+#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
+#define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp))
+
+#define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT)
+#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+
+#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT)
+
+#define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT)
+#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+
+#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
+
+#define BNXT_MAX_RX_DESC_CNT		(RX_DESC_CNT * MAX_RX_PAGES - 1)
+#define BNXT_MAX_RX_JUM_DESC_CNT	(RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
+#define BNXT_MAX_TX_DESC_CNT		(TX_DESC_CNT * MAX_TX_PAGES - 1)
+
+#define RX_RING(x)	(((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define RX_IDX(x)	((x) & (RX_DESC_CNT - 1))
+
+#define TX_RING(x)	(((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define TX_IDX(x)	((x) & (TX_DESC_CNT - 1))
+
+#define CP_RING(x)	(((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
+#define CP_IDX(x)	((x) & (CP_DESC_CNT - 1))
+
+#define TX_CMP_VALID(txcmp, raw_cons)					\
+	(!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) ==	\
+	 !((raw_cons) & bp->cp_bit))
+
+#define RX_CMP_VALID(rxcmp1, raw_cons)					\
+	(!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
+	 !((raw_cons) & bp->cp_bit))
+
+#define RX_AGG_CMP_VALID(agg, raw_cons)				\
+	(!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) ==	\
+	 !((raw_cons) & bp->cp_bit))
+
+#define TX_CMP_TYPE(txcmp)					\
+	(le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
+
+#define RX_CMP_TYPE(rxcmp)					\
+	(le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
+
+#define NEXT_RX(idx)		(((idx) + 1) & bp->rx_ring_mask)
+
+#define NEXT_RX_AGG(idx)	(((idx) + 1) & bp->rx_agg_ring_mask)
+
+#define NEXT_TX(idx)		(((idx) + 1) & bp->tx_ring_mask)
+
+#define ADV_RAW_CMP(idx, n)	((idx) + (n))
+#define NEXT_RAW_CMP(idx)	ADV_RAW_CMP(idx, 1)
+#define RING_CMP(idx)		((idx) & bp->cp_ring_mask)
+#define NEXT_CMP(idx)		RING_CMP(ADV_RAW_CMP(idx, 1))
+
+#define HWRM_CMD_TIMEOUT		500
+#define HWRM_RESET_TIMEOUT		((HWRM_CMD_TIMEOUT) * 4)
+#define HWRM_RESP_ERR_CODE_MASK		0xffff
+#define HWRM_RESP_LEN_MASK		0xffff0000
+#define HWRM_RESP_LEN_SFT		16
+#define HWRM_RESP_VALID_MASK		0xff000000
+#define BNXT_HWRM_REQ_MAX_SIZE		128
+#define BNXT_HWRM_REQS_PER_PAGE		(BNXT_PAGE_SIZE /	\
+					 BNXT_HWRM_REQ_MAX_SIZE)
+
+struct bnxt_sw_tx_bd {
+	struct sk_buff		*skb;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+	u8			is_gso;
+	u8			is_push;
+	unsigned short		nr_frags;
+};
+
+struct bnxt_sw_rx_bd {
+	u8			*data;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct bnxt_sw_rx_agg_bd {
+	struct page		*page;
+	dma_addr_t		mapping;
+};
+
+struct bnxt_ring_struct {
+	int			nr_pages;
+	int			page_size;
+	void			**pg_arr;
+	dma_addr_t		*dma_arr;
+
+	__le64			*pg_tbl;
+	dma_addr_t		pg_tbl_map;
+
+	int			vmem_size;
+	void			**vmem;
+
+	u16			fw_ring_id; /* Ring id filled by Chimp FW */
+	u8			queue_id;
+};
+
+struct tx_push_bd {
+	__le32			doorbell;
+	struct tx_bd		txbd1;
+	struct tx_bd_ext	txbd2;
+};
+
+struct bnxt_tx_ring_info {
+	u16			tx_prod;
+	u16			tx_cons;
+	void __iomem		*tx_doorbell;
+
+	struct tx_bd		*tx_desc_ring[MAX_TX_PAGES];
+	struct bnxt_sw_tx_bd	*tx_buf_ring;
+
+	dma_addr_t		tx_desc_mapping[MAX_TX_PAGES];
+
+	struct tx_push_bd	*tx_push;
+	dma_addr_t		tx_push_mapping;
+
+#define BNXT_DEV_STATE_CLOSING	0x1
+	u32			dev_state;
+
+	struct bnxt_ring_struct	tx_ring_struct;
+};
+
+struct bnxt_tpa_info {
+	u8			*data;
+	dma_addr_t		mapping;
+	u16			len;
+	unsigned short		gso_type;
+	u32			flags2;
+	u32			metadata;
+	enum pkt_hash_types	hash_type;
+	u32			rss_hash;
+};
+
+struct bnxt_rx_ring_info {
+	u16			rx_prod;
+	u16			rx_agg_prod;
+	u16			rx_sw_agg_prod;
+	void __iomem		*rx_doorbell;
+	void __iomem		*rx_agg_doorbell;
+
+	struct rx_bd		*rx_desc_ring[MAX_RX_PAGES];
+	struct bnxt_sw_rx_bd	*rx_buf_ring;
+
+	struct rx_bd		*rx_agg_desc_ring[MAX_RX_AGG_PAGES];
+	struct bnxt_sw_rx_agg_bd	*rx_agg_ring;
+
+	unsigned long		*rx_agg_bmap;
+	u16			rx_agg_bmap_size;
+
+	dma_addr_t		rx_desc_mapping[MAX_RX_PAGES];
+	dma_addr_t		rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
+
+	struct bnxt_tpa_info	*rx_tpa;
+
+	struct bnxt_ring_struct	rx_ring_struct;
+	struct bnxt_ring_struct	rx_agg_ring_struct;
+};
+
+struct bnxt_cp_ring_info {
+	u32			cp_raw_cons;
+	void __iomem		*cp_doorbell;
+
+	struct tx_cmp		*cp_desc_ring[MAX_CP_PAGES];
+
+	dma_addr_t		cp_desc_mapping[MAX_CP_PAGES];
+
+	struct ctx_hw_stats	*hw_stats;
+	dma_addr_t		hw_stats_map;
+	u32			hw_stats_ctx_id;
+	u64			rx_l4_csum_errors;
+
+	struct bnxt_ring_struct	cp_ring_struct;
+};
+
+struct bnxt_napi {
+	struct napi_struct	napi;
+	struct bnxt		*bp;
+
+	int			index;
+	struct bnxt_cp_ring_info	cp_ring;
+	struct bnxt_rx_ring_info	rx_ring;
+	struct bnxt_tx_ring_info	tx_ring;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	atomic_t		poll_state;
+#endif
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+enum bnxt_poll_state_t {
+	BNXT_STATE_IDLE = 0,
+	BNXT_STATE_NAPI,
+	BNXT_STATE_POLL,
+	BNXT_STATE_DISABLE,
+};
+#endif
+
+struct bnxt_irq {
+	irq_handler_t	handler;
+	unsigned int	vector;
+	u8		requested;
+	char		name[IFNAMSIZ + 2];
+};
+
+#define HWRM_RING_ALLOC_TX	0x1
+#define HWRM_RING_ALLOC_RX	0x2
+#define HWRM_RING_ALLOC_AGG	0x4
+#define HWRM_RING_ALLOC_CMPL	0x8
+
+#define INVALID_STATS_CTX_ID	-1
+
+struct hwrm_cmd_req_hdr {
+#define HWRM_CMPL_RING_MASK	0xffff0000
+#define HWRM_CMPL_RING_SFT	16
+	__le32	cmpl_ring_req_type;
+#define HWRM_SEQ_ID_MASK	0xffff
+#define HWRM_SEQ_ID_INVALID -1
+#define HWRM_RESP_LEN_OFFSET	4
+#define HWRM_TARGET_FID_MASK	0xffff0000
+#define HWRM_TARGET_FID_SFT	16
+	__le32	target_id_seq_id;
+	__le64	resp_addr;
+};
+
+struct bnxt_ring_grp_info {
+	u16	fw_stats_ctx;
+	u16	fw_grp_id;
+	u16	rx_fw_ring_id;
+	u16	agg_fw_ring_id;
+	u16	cp_fw_ring_id;
+};
+
+struct bnxt_vnic_info {
+	u16		fw_vnic_id; /* returned by Chimp during alloc */
+	u16		fw_rss_cos_lb_ctx;
+	u16		fw_l2_ctx_id;
+#define BNXT_MAX_UC_ADDRS	4
+	__le64		fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
+				/* index 0 always dev_addr */
+	u16		uc_filter_count;
+	u8		*uc_list;
+
+	u16		*fw_grp_ids;
+	u16		hash_type;
+	dma_addr_t	rss_table_dma_addr;
+	__le16		*rss_table;
+	dma_addr_t	rss_hash_key_dma_addr;
+	u64		*rss_hash_key;
+	u32		rx_mask;
+
+	u8		*mc_list;
+	int		mc_list_size;
+	int		mc_list_count;
+	dma_addr_t	mc_list_mapping;
+#define BNXT_MAX_MC_ADDRS	16
+
+	u32		flags;
+#define BNXT_VNIC_RSS_FLAG	1
+#define BNXT_VNIC_RFS_FLAG	2
+#define BNXT_VNIC_MCAST_FLAG	4
+#define BNXT_VNIC_UCAST_FLAG	8
+};
+
+#if defined(CONFIG_BNXT_SRIOV)
+struct bnxt_vf_info {
+	u16	fw_fid;
+	u8	mac_addr[ETH_ALEN];
+	u16	max_rsscos_ctxs;
+	u16	max_cp_rings;
+	u16	max_tx_rings;
+	u16	max_rx_rings;
+	u16	max_l2_ctxs;
+	u16	max_irqs;
+	u16	max_vnics;
+	u16	max_stat_ctxs;
+	u16	vlan;
+	u32	flags;
+#define BNXT_VF_QOS		0x1
+#define BNXT_VF_SPOOFCHK	0x2
+#define BNXT_VF_LINK_FORCED	0x4
+#define BNXT_VF_LINK_UP		0x8
+	u32	func_flags; /* func cfg flags */
+	u32	min_tx_rate;
+	u32	max_tx_rate;
+	void	*hwrm_cmd_req_addr;
+	dma_addr_t	hwrm_cmd_req_dma_addr;
+};
+#endif
+
+struct bnxt_pf_info {
+#define BNXT_FIRST_PF_FID	1
+#define BNXT_FIRST_VF_FID	128
+	u32	fw_fid;
+	u8	port_id;
+	u8	mac_addr[ETH_ALEN];
+	u16	max_rsscos_ctxs;
+	u16	max_cp_rings;
+	u16	max_tx_rings; /* HW assigned max tx rings for this PF */
+	u16	max_pf_tx_rings; /* runtime max tx rings owned by PF */
+	u16	max_rx_rings; /* HW assigned max rx rings for this PF */
+	u16	max_pf_rx_rings; /* runtime max rx rings owned by PF */
+	u16	max_irqs;
+	u16	max_l2_ctxs;
+	u16	max_vnics;
+	u16	max_stat_ctxs;
+	u32	first_vf_id;
+	u16	active_vfs;
+	u16	max_vfs;
+	u32	max_encap_records;
+	u32	max_decap_records;
+	u32	max_tx_em_flows;
+	u32	max_tx_wm_flows;
+	u32	max_rx_em_flows;
+	u32	max_rx_wm_flows;
+	unsigned long	*vf_event_bmap;
+	u16	hwrm_cmd_req_pages;
+	void			*hwrm_cmd_req_addr[4];
+	dma_addr_t		hwrm_cmd_req_dma_addr[4];
+	struct bnxt_vf_info	*vf;
+};
+
+struct bnxt_ntuple_filter {
+	struct hlist_node	hash;
+	u8			src_mac_addr[ETH_ALEN];
+	struct flow_keys	fkeys;
+	__le64			filter_id;
+	u16			sw_id;
+	u16			rxq;
+	u32			flow_id;
+	unsigned long		state;
+#define BNXT_FLTR_VALID		0
+#define BNXT_FLTR_UPDATE	1
+};
+
+#define BNXT_ALL_COPPER_ETHTOOL_SPEED				\
+	(ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full |	\
+	 ADVERTISED_10000baseT_Full)
+
+struct bnxt_link_info {
+	u8			media_type;
+	u8			transceiver;
+	u8			phy_addr;
+	u8			phy_link_status;
+#define BNXT_LINK_NO_LINK	PORT_PHY_QCFG_RESP_LINK_NO_LINK
+#define BNXT_LINK_SIGNAL	PORT_PHY_QCFG_RESP_LINK_SIGNAL
+#define BNXT_LINK_LINK		PORT_PHY_QCFG_RESP_LINK_LINK
+	u8			wire_speed;
+	u8			loop_back;
+	u8			link_up;
+	u8			duplex;
+#define BNXT_LINK_DUPLEX_HALF	PORT_PHY_QCFG_RESP_DUPLEX_HALF
+#define BNXT_LINK_DUPLEX_FULL	PORT_PHY_QCFG_RESP_DUPLEX_FULL
+	u8			pause;
+#define BNXT_LINK_PAUSE_TX	PORT_PHY_QCFG_RESP_PAUSE_TX
+#define BNXT_LINK_PAUSE_RX	PORT_PHY_QCFG_RESP_PAUSE_RX
+#define BNXT_LINK_PAUSE_BOTH	(PORT_PHY_QCFG_RESP_PAUSE_RX | \
+				 PORT_PHY_QCFG_RESP_PAUSE_TX)
+	u8			auto_pause_setting;
+	u8			force_pause_setting;
+	u8			duplex_setting;
+	u8			auto_mode;
+#define BNXT_AUTO_MODE(mode)	((mode) > BNXT_LINK_AUTO_NONE && \
+				 (mode) <= BNXT_LINK_AUTO_MSK)
+#define BNXT_LINK_AUTO_NONE     PORT_PHY_QCFG_RESP_AUTO_MODE_NONE
+#define BNXT_LINK_AUTO_ALLSPDS	PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
+#define BNXT_LINK_AUTO_ONESPD	PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
+#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
+#define BNXT_LINK_AUTO_MSK	PORT_PHY_QCFG_RESP_AUTO_MODE_MASK
+#define PHY_VER_LEN		3
+	u8			phy_ver[PHY_VER_LEN];
+	u16			link_speed;
+#define BNXT_LINK_SPEED_100MB	PORT_PHY_QCFG_RESP_LINK_SPEED_100MB
+#define BNXT_LINK_SPEED_1GB	PORT_PHY_QCFG_RESP_LINK_SPEED_1GB
+#define BNXT_LINK_SPEED_2GB	PORT_PHY_QCFG_RESP_LINK_SPEED_2GB
+#define BNXT_LINK_SPEED_2_5GB	PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB
+#define BNXT_LINK_SPEED_10GB	PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
+#define BNXT_LINK_SPEED_20GB	PORT_PHY_QCFG_RESP_LINK_SPEED_20GB
+#define BNXT_LINK_SPEED_25GB	PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
+#define BNXT_LINK_SPEED_40GB	PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
+#define BNXT_LINK_SPEED_50GB	PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+	u16			support_speeds;
+	u16			auto_link_speeds;
+#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
+#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
+#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
+#define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB
+#define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB
+#define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB
+#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
+#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
+#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+	u16			auto_link_speed;
+	u16			force_link_speed;
+	u32			preemphasis;
+
+	/* copy of requested setting from ethtool cmd */
+	u8			autoneg;
+#define BNXT_AUTONEG_SPEED		1
+#define BNXT_AUTONEG_FLOW_CTRL		2
+	u8			req_duplex;
+	u8			req_flow_ctrl;
+	u16			req_link_speed;
+	u32			advertising;
+	bool			force_link_chng;
+	/* a copy of phy_qcfg output used to report link
+	 * info to VF
+	 */
+	struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+};
+
+#define BNXT_MAX_QUEUE	8
+
+struct bnxt_queue_info {
+	u8	queue_id;
+	u8	queue_profile;
+};
+
+struct bnxt {
+	void __iomem		*bar0;
+	void __iomem		*bar1;
+	void __iomem		*bar2;
+
+	u32			reg_base;
+
+	struct net_device	*dev;
+	struct pci_dev		*pdev;
+
+	atomic_t		intr_sem;
+
+	u32			flags;
+	#define BNXT_FLAG_DCB_ENABLED	0x1
+	#define BNXT_FLAG_VF		0x2
+	#define BNXT_FLAG_LRO		0x4
+#ifdef CONFIG_INET
+	#define BNXT_FLAG_GRO		0x8
+#else
+	/* Cannot support hardware GRO if CONFIG_INET is not set */
+	#define BNXT_FLAG_GRO		0x0
+#endif
+	#define BNXT_FLAG_TPA		(BNXT_FLAG_LRO | BNXT_FLAG_GRO)
+	#define BNXT_FLAG_JUMBO		0x10
+	#define BNXT_FLAG_STRIP_VLAN	0x20
+	#define BNXT_FLAG_AGG_RINGS	(BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+					 BNXT_FLAG_LRO)
+	#define BNXT_FLAG_USING_MSIX	0x40
+	#define BNXT_FLAG_MSIX_CAP	0x80
+	#define BNXT_FLAG_RFS		0x100
+	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
+					    BNXT_FLAG_RFS |		\
+					    BNXT_FLAG_STRIP_VLAN)
+
+#define BNXT_PF(bp)		(!((bp)->flags & BNXT_FLAG_VF))
+#define BNXT_VF(bp)		((bp)->flags & BNXT_FLAG_VF)
+
+	struct bnxt_napi	**bnapi;
+
+	u32			rx_buf_size;
+	u32			rx_buf_use_size;	/* useable size */
+	u32			rx_ring_size;
+	u32			rx_agg_ring_size;
+	u32			rx_copy_thresh;
+	u32			rx_ring_mask;
+	u32			rx_agg_ring_mask;
+	int			rx_nr_pages;
+	int			rx_agg_nr_pages;
+	int			rx_nr_rings;
+	int			rsscos_nr_ctxs;
+
+	u32			tx_ring_size;
+	u32			tx_ring_mask;
+	int			tx_nr_pages;
+	int			tx_nr_rings;
+	int			tx_nr_rings_per_tc;
+
+	int			tx_wake_thresh;
+	int			tx_push_thresh;
+	int			tx_push_size;
+
+	u32			cp_ring_size;
+	u32			cp_ring_mask;
+	u32			cp_bit;
+	int			cp_nr_pages;
+	int			cp_nr_rings;
+
+	int			num_stat_ctxs;
+	struct bnxt_ring_grp_info	*grp_info;
+	struct bnxt_vnic_info	*vnic_info;
+	int			nr_vnics;
+
+	u8			max_tc;
+	struct bnxt_queue_info	q_info[BNXT_MAX_QUEUE];
+
+	unsigned int		current_interval;
+#define BNXT_TIMER_INTERVAL	(HZ / 2)
+
+	struct timer_list	timer;
+
+	int			state;
+#define BNXT_STATE_CLOSED	0
+#define BNXT_STATE_OPEN		1
+
+	struct bnxt_irq	*irq_tbl;
+	u8			mac_addr[ETH_ALEN];
+
+	u32			msg_enable;
+
+	u16			hwrm_cmd_seq;
+	u32			hwrm_intr_seq_id;
+	void			*hwrm_cmd_resp_addr;
+	dma_addr_t		hwrm_cmd_resp_dma_addr;
+	void			*hwrm_dbg_resp_addr;
+	dma_addr_t		hwrm_dbg_resp_dma_addr;
+#define HWRM_DBG_REG_BUF_SIZE	128
+	struct mutex		hwrm_cmd_lock;	/* serialize hwrm messages */
+	struct hwrm_ver_get_output	ver_resp;
+#define FW_VER_STR_LEN		32
+#define BC_HWRM_STR_LEN		21
+#define PHY_VER_STR_LEN         (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
+	char			fw_ver_str[FW_VER_STR_LEN];
+	__be16			vxlan_port;
+	u8			vxlan_port_cnt;
+	__le16			vxlan_fw_dst_port_id;
+	u8			nge_port_cnt;
+	__le16			nge_fw_dst_port_id;
+	u16			coal_ticks;
+	u16			coal_ticks_irq;
+	u16			coal_bufs;
+	u16			coal_bufs_irq;
+
+#define BNXT_USEC_TO_COAL_TIMER(x)	((x) * 25 / 2)
+#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25)
+
+	struct work_struct	sp_task;
+	unsigned long		sp_event;
+#define BNXT_RX_MASK_SP_EVENT		0
+#define BNXT_RX_NTP_FLTR_SP_EVENT	1
+#define BNXT_LINK_CHNG_SP_EVENT		2
+#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT	4
+#define BNXT_VXLAN_ADD_PORT_SP_EVENT	8
+#define BNXT_VXLAN_DEL_PORT_SP_EVENT	16
+#define BNXT_RESET_TASK_SP_EVENT	32
+#define BNXT_RST_RING_SP_EVENT		64
+
+	struct bnxt_pf_info	pf;
+#ifdef CONFIG_BNXT_SRIOV
+	int			nr_vfs;
+	struct bnxt_vf_info	vf;
+	wait_queue_head_t	sriov_cfg_wait;
+	bool			sriov_cfg;
+#define BNXT_SRIOV_CFG_WAIT_TMO	msecs_to_jiffies(10000)
+#endif
+
+#define BNXT_NTP_FLTR_MAX_FLTR	4096
+#define BNXT_NTP_FLTR_HASH_SIZE	512
+#define BNXT_NTP_FLTR_HASH_MASK	(BNXT_NTP_FLTR_HASH_SIZE - 1)
+	struct hlist_head	ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
+	spinlock_t		ntp_fltr_lock;	/* for hash table add, del */
+
+	unsigned long		*ntp_fltr_bmap;
+	int			ntp_fltr_count;
+
+	struct bnxt_link_info	link_info;
+};
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+	atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the NAPI poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+	int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+				BNXT_STATE_NAPI);
+
+	return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+	atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+/* called from the busy poll routine to get ownership of a bnapi */
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+	int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+				BNXT_STATE_POLL);
+
+	return rc == BNXT_STATE_IDLE;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+	atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+	return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+	int old;
+
+	while (1) {
+		old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
+				     BNXT_STATE_DISABLE);
+		if (old == BNXT_STATE_IDLE)
+			break;
+		usleep_range(500, 5000);
+	}
+}
+
+#else
+
+static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
+{
+	return true;
+}
+
+static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
+{
+	return false;
+}
+
+static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
+{
+}
+
+static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
+{
+	return false;
+}
+
+static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
+{
+}
+
+#endif
+
+void bnxt_set_ring_params(struct bnxt *);
+void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
+int _hwrm_send_message(struct bnxt *, void *, u32, int);
+int hwrm_send_message(struct bnxt *, void *, u32, int);
+int bnxt_hwrm_set_coal(struct bnxt *);
+int bnxt_hwrm_set_pause(struct bnxt *);
+int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
+int bnxt_open_nic(struct bnxt *, bool, bool);
+int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_get_max_rings(struct bnxt *, int *, int *);
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
new file mode 100644
index 0000000..45bd628
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -0,0 +1,1149 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_ethtool.h"
+#include "bnxt_nvm_defs.h"	/* NVRAM content constant and structure defs */
+#include "bnxt_fw_hdr.h"	/* Firmware hdr constant and structure defs */
+#define FLASH_NVRAM_TIMEOUT	((HWRM_CMD_TIMEOUT) * 100)
+
+static u32 bnxt_get_msglevel(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	return bp->msg_enable;
+}
+
+static void bnxt_set_msglevel(struct net_device *dev, u32 value)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	bp->msg_enable = value;
+}
+
+static int bnxt_get_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *coal)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	memset(coal, 0, sizeof(*coal));
+
+	coal->rx_coalesce_usecs =
+		max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1);
+	coal->rx_max_coalesced_frames = bp->coal_bufs / 2;
+	coal->rx_coalesce_usecs_irq =
+		max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1);
+	coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2;
+
+	return 0;
+}
+
+static int bnxt_set_coalesce(struct net_device *dev,
+			     struct ethtool_coalesce *coal)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc = 0;
+
+	bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs);
+	bp->coal_bufs = coal->rx_max_coalesced_frames * 2;
+	bp->coal_ticks_irq =
+		BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq);
+	bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2;
+
+	if (netif_running(dev))
+		rc = bnxt_hwrm_set_coal(bp);
+
+	return rc;
+}
+
+#define BNXT_NUM_STATS	21
+
+static int bnxt_get_sset_count(struct net_device *dev, int sset)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	switch (sset) {
+	case ETH_SS_STATS:
+		return BNXT_NUM_STATS * bp->cp_nr_rings;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void bnxt_get_ethtool_stats(struct net_device *dev,
+				   struct ethtool_stats *stats, u64 *buf)
+{
+	u32 i, j = 0;
+	struct bnxt *bp = netdev_priv(dev);
+	u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
+	u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
+
+	memset(buf, 0, buf_size);
+
+	if (!bp->bnapi)
+		return;
+
+	for (i = 0; i < bp->cp_nr_rings; i++) {
+		struct bnxt_napi *bnapi = bp->bnapi[i];
+		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+		__le64 *hw_stats = (__le64 *)cpr->hw_stats;
+		int k;
+
+		for (k = 0; k < stat_fields; j++, k++)
+			buf[j] = le64_to_cpu(hw_stats[k]);
+		buf[j++] = cpr->rx_l4_csum_errors;
+	}
+}
+
+static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	u32 i;
+
+	switch (stringset) {
+	/* The number of strings must match BNXT_NUM_STATS defined above. */
+	case ETH_SS_STATS:
+		for (i = 0; i < bp->cp_nr_rings; i++) {
+			sprintf(buf, "[%d]: rx_ucast_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_mcast_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_bcast_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_discards", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_drops", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_ucast_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_mcast_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_bcast_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_ucast_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_mcast_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_bcast_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_discards", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_drops", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_ucast_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_mcast_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tx_bcast_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tpa_packets", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tpa_bytes", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tpa_events", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: tpa_aborts", i);
+			buf += ETH_GSTRING_LEN;
+			sprintf(buf, "[%d]: rx_l4_csum_errors", i);
+			buf += ETH_GSTRING_LEN;
+		}
+		break;
+	default:
+		netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
+			   stringset);
+		break;
+	}
+}
+
+static void bnxt_get_ringparam(struct net_device *dev,
+			       struct ethtool_ringparam *ering)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
+	ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+	ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
+
+	ering->rx_pending = bp->rx_ring_size;
+	ering->rx_jumbo_pending = bp->rx_agg_ring_size;
+	ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnxt_set_ringparam(struct net_device *dev,
+			      struct ethtool_ringparam *ering)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
+	    (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
+	    (ering->tx_pending <= MAX_SKB_FRAGS))
+		return -EINVAL;
+
+	if (netif_running(dev))
+		bnxt_close_nic(bp, false, false);
+
+	bp->rx_ring_size = ering->rx_pending;
+	bp->tx_ring_size = ering->tx_pending;
+	bnxt_set_ring_params(bp);
+
+	if (netif_running(dev))
+		return bnxt_open_nic(bp, false, false);
+
+	return 0;
+}
+
+static void bnxt_get_channels(struct net_device *dev,
+			      struct ethtool_channels *channel)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int max_rx_rings, max_tx_rings, tcs;
+
+	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+	tcs = netdev_get_num_tc(dev);
+	if (tcs > 1)
+		max_tx_rings /= tcs;
+
+	channel->max_rx = max_rx_rings;
+	channel->max_tx = max_tx_rings;
+	channel->max_other = 0;
+	channel->max_combined = 0;
+	channel->rx_count = bp->rx_nr_rings;
+	channel->tx_count = bp->tx_nr_rings_per_tc;
+}
+
+static int bnxt_set_channels(struct net_device *dev,
+			     struct ethtool_channels *channel)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int max_rx_rings, max_tx_rings, tcs;
+	u32 rc = 0;
+
+	if (channel->other_count || channel->combined_count ||
+	    !channel->rx_count || !channel->tx_count)
+		return -EINVAL;
+
+	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
+	tcs = netdev_get_num_tc(dev);
+	if (tcs > 1)
+		max_tx_rings /= tcs;
+
+	if (channel->rx_count > max_rx_rings ||
+	    channel->tx_count > max_tx_rings)
+		return -EINVAL;
+
+	if (netif_running(dev)) {
+		if (BNXT_PF(bp)) {
+			/* TODO CHIMP_FW: Send message to all VF's
+			 * before PF unload
+			 */
+		}
+		rc = bnxt_close_nic(bp, true, false);
+		if (rc) {
+			netdev_err(bp->dev, "Set channel failure rc :%x\n",
+				   rc);
+			return rc;
+		}
+	}
+
+	bp->rx_nr_rings = channel->rx_count;
+	bp->tx_nr_rings_per_tc = channel->tx_count;
+	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+	if (tcs > 1)
+		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+	bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+	bp->num_stat_ctxs = bp->cp_nr_rings;
+
+	if (netif_running(dev)) {
+		rc = bnxt_open_nic(bp, true, false);
+		if ((!rc) && BNXT_PF(bp)) {
+			/* TODO CHIMP_FW: Send message to all VF's
+			 * to renable
+			 */
+		}
+	}
+
+	return rc;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
+			    u32 *rule_locs)
+{
+	int i, j = 0;
+
+	cmd->data = bp->ntp_fltr_count;
+	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+		struct hlist_head *head;
+		struct bnxt_ntuple_filter *fltr;
+
+		head = &bp->ntp_fltr_hash_tbl[i];
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(fltr, head, hash) {
+			if (j == cmd->rule_cnt)
+				break;
+			rule_locs[j++] = fltr->sw_id;
+		}
+		rcu_read_unlock();
+		if (j == cmd->rule_cnt)
+			break;
+	}
+	cmd->rule_cnt = j;
+	return 0;
+}
+
+static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
+{
+	struct ethtool_rx_flow_spec *fs =
+		(struct ethtool_rx_flow_spec *)&cmd->fs;
+	struct bnxt_ntuple_filter *fltr;
+	struct flow_keys *fkeys;
+	int i, rc = -EINVAL;
+
+	if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+		return rc;
+
+	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
+		struct hlist_head *head;
+
+		head = &bp->ntp_fltr_hash_tbl[i];
+		rcu_read_lock();
+		hlist_for_each_entry_rcu(fltr, head, hash) {
+			if (fltr->sw_id == fs->location)
+				goto fltr_found;
+		}
+		rcu_read_unlock();
+	}
+	return rc;
+
+fltr_found:
+	fkeys = &fltr->fkeys;
+	if (fkeys->basic.ip_proto == IPPROTO_TCP)
+		fs->flow_type = TCP_V4_FLOW;
+	else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+		fs->flow_type = UDP_V4_FLOW;
+	else
+		goto fltr_err;
+
+	fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+	fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+
+	fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+	fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+	fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+	fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+	fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+	fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+
+	fs->ring_cookie = fltr->rxq;
+	rc = 0;
+
+fltr_err:
+	rcu_read_unlock();
+
+	return rc;
+}
+
+static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+			  u32 *rule_locs)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc = 0;
+
+	switch (cmd->cmd) {
+	case ETHTOOL_GRXRINGS:
+		cmd->data = bp->rx_nr_rings;
+		break;
+
+	case ETHTOOL_GRXCLSRLCNT:
+		cmd->rule_cnt = bp->ntp_fltr_count;
+		cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
+		break;
+
+	case ETHTOOL_GRXCLSRLALL:
+		rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
+		break;
+
+	case ETHTOOL_GRXCLSRULE:
+		rc = bnxt_grxclsrule(bp, cmd);
+		break;
+
+	default:
+		rc = -EOPNOTSUPP;
+		break;
+	}
+
+	return rc;
+}
+#endif
+
+static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
+{
+	return HW_HASH_INDEX_SIZE;
+}
+
+static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
+{
+	return HW_HASH_KEY_SIZE;
+}
+
+static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+			 u8 *hfunc)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+	int i = 0;
+
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_TOP;
+
+	if (indir)
+		for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
+			indir[i] = le16_to_cpu(vnic->rss_table[i]);
+
+	if (key)
+		memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+
+	return 0;
+}
+
+static void bnxt_get_drvinfo(struct net_device *dev,
+			     struct ethtool_drvinfo *info)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+	strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+	strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+	info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+	info->testinfo_len = BNXT_NUM_TESTS(bp);
+	/* TODO CHIMP_FW: eeprom dump details */
+	info->eedump_len = 0;
+	/* TODO CHIMP FW: reg dump details */
+	info->regdump_len = 0;
+}
+
+static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+{
+	u16 fw_speeds = link_info->support_speeds;
+	u32 speed_mask = 0;
+
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+		speed_mask |= SUPPORTED_100baseT_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+		speed_mask |= SUPPORTED_1000baseT_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+		speed_mask |= SUPPORTED_2500baseX_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+		speed_mask |= SUPPORTED_10000baseT_Full;
+	/* TODO: support 25GB, 50GB with different cable type */
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+		speed_mask |= SUPPORTED_20000baseMLD2_Full |
+			SUPPORTED_20000baseKR2_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+		speed_mask |= SUPPORTED_40000baseKR4_Full |
+			SUPPORTED_40000baseCR4_Full |
+			SUPPORTED_40000baseSR4_Full |
+			SUPPORTED_40000baseLR4_Full;
+
+	return speed_mask;
+}
+
+static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
+{
+	u16 fw_speeds = link_info->auto_link_speeds;
+	u32 speed_mask = 0;
+
+	/* TODO: support 25GB, 40GB, 50GB with different cable type */
+	/* set the advertised speeds */
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
+		speed_mask |= ADVERTISED_100baseT_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
+		speed_mask |= ADVERTISED_1000baseT_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
+		speed_mask |= ADVERTISED_2500baseX_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
+		speed_mask |= ADVERTISED_10000baseT_Full;
+	/* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
+		speed_mask |= ADVERTISED_20000baseMLD2_Full |
+			      ADVERTISED_20000baseKR2_Full;
+	if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
+		speed_mask |= ADVERTISED_40000baseKR4_Full |
+			      ADVERTISED_40000baseCR4_Full |
+			      ADVERTISED_40000baseSR4_Full |
+			      ADVERTISED_40000baseLR4_Full;
+	return speed_mask;
+}
+
+u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
+{
+	switch (fw_link_speed) {
+	case BNXT_LINK_SPEED_100MB:
+		return SPEED_100;
+	case BNXT_LINK_SPEED_1GB:
+		return SPEED_1000;
+	case BNXT_LINK_SPEED_2_5GB:
+		return SPEED_2500;
+	case BNXT_LINK_SPEED_10GB:
+		return SPEED_10000;
+	case BNXT_LINK_SPEED_20GB:
+		return SPEED_20000;
+	case BNXT_LINK_SPEED_25GB:
+		return SPEED_25000;
+	case BNXT_LINK_SPEED_40GB:
+		return SPEED_40000;
+	case BNXT_LINK_SPEED_50GB:
+		return SPEED_50000;
+	default:
+		return SPEED_UNKNOWN;
+	}
+}
+
+static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_link_info *link_info = &bp->link_info;
+	u16 ethtool_speed;
+
+	cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
+
+	if (link_info->auto_link_speeds)
+		cmd->supported |= SUPPORTED_Autoneg;
+
+	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+		cmd->advertising =
+			bnxt_fw_to_ethtool_advertised_spds(link_info);
+		cmd->advertising |= ADVERTISED_Autoneg;
+		cmd->autoneg = AUTONEG_ENABLE;
+	} else {
+		cmd->autoneg = AUTONEG_DISABLE;
+		cmd->advertising = 0;
+	}
+	if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+		if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+		    BNXT_LINK_PAUSE_BOTH) {
+			cmd->advertising |= ADVERTISED_Pause;
+			cmd->supported |= SUPPORTED_Pause;
+		} else {
+			cmd->advertising |= ADVERTISED_Asym_Pause;
+			cmd->supported |= SUPPORTED_Asym_Pause;
+			if (link_info->auto_pause_setting &
+			    BNXT_LINK_PAUSE_RX)
+				cmd->advertising |= ADVERTISED_Pause;
+		}
+	} else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+		if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
+		    BNXT_LINK_PAUSE_BOTH) {
+			cmd->supported |= SUPPORTED_Pause;
+		} else {
+			cmd->supported |= SUPPORTED_Asym_Pause;
+			if (link_info->force_pause_setting &
+			    BNXT_LINK_PAUSE_RX)
+				cmd->supported |= SUPPORTED_Pause;
+		}
+	}
+
+	cmd->port = PORT_NONE;
+	if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+		cmd->port = PORT_TP;
+		cmd->supported |= SUPPORTED_TP;
+		cmd->advertising |= ADVERTISED_TP;
+	} else {
+		cmd->supported |= SUPPORTED_FIBRE;
+		cmd->advertising |= ADVERTISED_FIBRE;
+
+		if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+			cmd->port = PORT_DA;
+		else if (link_info->media_type ==
+			 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
+			cmd->port = PORT_FIBRE;
+	}
+
+	if (link_info->phy_link_status == BNXT_LINK_LINK) {
+		if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+			cmd->duplex = DUPLEX_FULL;
+	} else {
+		cmd->duplex = DUPLEX_UNKNOWN;
+	}
+	ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+	ethtool_cmd_speed_set(cmd, ethtool_speed);
+	if (link_info->transceiver ==
+		PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL)
+		cmd->transceiver = XCVR_INTERNAL;
+	else
+		cmd->transceiver = XCVR_EXTERNAL;
+	cmd->phy_address = link_info->phy_addr;
+
+	return 0;
+}
+
+static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
+{
+	switch (ethtool_speed) {
+	case SPEED_100:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+	case SPEED_1000:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+	case SPEED_2500:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+	case SPEED_10000:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+	case SPEED_20000:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+	case SPEED_25000:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+	case SPEED_40000:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+	case SPEED_50000:
+		return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+	default:
+		netdev_err(dev, "unsupported speed!\n");
+		break;
+	}
+	return 0;
+}
+
+static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+{
+	u16 fw_speed_mask = 0;
+
+	/* only support autoneg at speed 100, 1000, and 10000 */
+	if (advertising & (ADVERTISED_100baseT_Full |
+			   ADVERTISED_100baseT_Half)) {
+		fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
+	}
+	if (advertising & (ADVERTISED_1000baseT_Full |
+			   ADVERTISED_1000baseT_Half)) {
+		fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
+	}
+	if (advertising & ADVERTISED_10000baseT_Full)
+		fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
+
+	return fw_speed_mask;
+}
+
+static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	int rc = 0;
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_link_info *link_info = &bp->link_info;
+	u32 speed, fw_advertising = 0;
+	bool set_pause = false;
+
+	if (BNXT_VF(bp))
+		return rc;
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+			netdev_err(dev, "Media type doesn't support autoneg\n");
+			rc = -EINVAL;
+			goto set_setting_exit;
+		}
+		if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED |
+					 ADVERTISED_Autoneg |
+					 ADVERTISED_TP |
+					 ADVERTISED_Pause |
+					 ADVERTISED_Asym_Pause)) {
+			netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
+				   cmd->advertising);
+			rc = -EINVAL;
+			goto set_setting_exit;
+		}
+		fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
+		if (fw_advertising & ~link_info->support_speeds) {
+			netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
+				   cmd->advertising);
+			rc = -EINVAL;
+			goto set_setting_exit;
+		}
+		link_info->autoneg |= BNXT_AUTONEG_SPEED;
+		if (!fw_advertising)
+			link_info->advertising = link_info->support_speeds;
+		else
+			link_info->advertising = fw_advertising;
+		/* any change to autoneg will cause link change, therefore the
+		 * driver should put back the original pause setting in autoneg
+		 */
+		set_pause = true;
+	} else {
+		/* TODO: currently don't support half duplex */
+		if (cmd->duplex == DUPLEX_HALF) {
+			netdev_err(dev, "HALF DUPLEX is not supported!\n");
+			rc = -EINVAL;
+			goto set_setting_exit;
+		}
+		/* If received a request for an unknown duplex, assume full*/
+		if (cmd->duplex == DUPLEX_UNKNOWN)
+			cmd->duplex = DUPLEX_FULL;
+		speed = ethtool_cmd_speed(cmd);
+		link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
+		link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
+		link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+		link_info->advertising = 0;
+	}
+
+	if (netif_running(dev))
+		rc = bnxt_hwrm_set_link_setting(bp, set_pause);
+
+set_setting_exit:
+	return rc;
+}
+
+static void bnxt_get_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *epause)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_link_info *link_info = &bp->link_info;
+
+	if (BNXT_VF(bp))
+		return;
+	epause->autoneg = !!(link_info->auto_pause_setting &
+			     BNXT_LINK_PAUSE_BOTH);
+	epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
+	epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
+}
+
+static int bnxt_set_pauseparam(struct net_device *dev,
+			       struct ethtool_pauseparam *epause)
+{
+	int rc = 0;
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_link_info *link_info = &bp->link_info;
+
+	if (BNXT_VF(bp))
+		return rc;
+
+	if (epause->autoneg) {
+		link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
+	} else {
+		/* when transition from auto pause to force pause,
+		 * force a link change
+		 */
+		if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+			link_info->force_link_chng = true;
+		link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
+		link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH;
+	}
+	if (epause->rx_pause)
+		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
+	else
+		link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
+
+	if (epause->tx_pause)
+		link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
+	else
+		link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
+
+	if (netif_running(dev))
+		rc = bnxt_hwrm_set_pause(bp);
+	return rc;
+}
+
+static u32 bnxt_get_link(struct net_device *dev)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	/* TODO: handle MF, VF, driver close case */
+	return bp->link_info.link_up;
+}
+
+static int bnxt_flash_nvram(struct net_device *dev,
+			    u16 dir_type,
+			    u16 dir_ordinal,
+			    u16 dir_ext,
+			    u16 dir_attr,
+			    const u8 *data,
+			    size_t data_len)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc;
+	struct hwrm_nvm_write_input req = {0};
+	dma_addr_t dma_handle;
+	u8 *kmem;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1);
+
+	req.dir_type = cpu_to_le16(dir_type);
+	req.dir_ordinal = cpu_to_le16(dir_ordinal);
+	req.dir_ext = cpu_to_le16(dir_ext);
+	req.dir_attr = cpu_to_le16(dir_attr);
+	req.dir_data_length = cpu_to_le32(data_len);
+
+	kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle,
+				  GFP_KERNEL);
+	if (!kmem) {
+		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+			   (unsigned)data_len);
+		return -ENOMEM;
+	}
+	memcpy(kmem, data, data_len);
+	req.host_src_addr = cpu_to_le64(dma_handle);
+
+	rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
+	dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
+
+	return rc;
+}
+
+static int bnxt_flash_firmware(struct net_device *dev,
+			       u16 dir_type,
+			       const u8 *fw_data,
+			       size_t fw_size)
+{
+	int	rc = 0;
+	u16	code_type;
+	u32	stored_crc;
+	u32	calculated_crc;
+	struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
+
+	switch (dir_type) {
+	case BNX_DIR_TYPE_BOOTCODE:
+	case BNX_DIR_TYPE_BOOTCODE_2:
+		code_type = CODE_BOOT;
+		break;
+	default:
+		netdev_err(dev, "Unsupported directory entry type: %u\n",
+			   dir_type);
+		return -EINVAL;
+	}
+	if (fw_size < sizeof(struct bnxt_fw_header)) {
+		netdev_err(dev, "Invalid firmware file size: %u\n",
+			   (unsigned int)fw_size);
+		return -EINVAL;
+	}
+	if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
+		netdev_err(dev, "Invalid firmware signature: %08X\n",
+			   le32_to_cpu(header->signature));
+		return -EINVAL;
+	}
+	if (header->code_type != code_type) {
+		netdev_err(dev, "Expected firmware type: %d, read: %d\n",
+			   code_type, header->code_type);
+		return -EINVAL;
+	}
+	if (header->device != DEVICE_CUMULUS_FAMILY) {
+		netdev_err(dev, "Expected firmware device family %d, read: %d\n",
+			   DEVICE_CUMULUS_FAMILY, header->device);
+		return -EINVAL;
+	}
+	/* Confirm the CRC32 checksum of the file: */
+	stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+					     sizeof(stored_crc)));
+	calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+	if (calculated_crc != stored_crc) {
+		netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
+			   (unsigned long)stored_crc,
+			   (unsigned long)calculated_crc);
+		return -EINVAL;
+	}
+	/* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
+	rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+			      0, 0, fw_data, fw_size);
+	if (rc == 0) {	/* Firmware update successful */
+		/* TODO: Notify processor it needs to reset itself
+		 */
+	}
+	return rc;
+}
+
+static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
+{
+	switch (dir_type) {
+	case BNX_DIR_TYPE_CHIMP_PATCH:
+	case BNX_DIR_TYPE_BOOTCODE:
+	case BNX_DIR_TYPE_BOOTCODE_2:
+	case BNX_DIR_TYPE_APE_FW:
+	case BNX_DIR_TYPE_APE_PATCH:
+	case BNX_DIR_TYPE_KONG_FW:
+	case BNX_DIR_TYPE_KONG_PATCH:
+		return true;
+	}
+
+	return false;
+}
+
+static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
+{
+	switch (dir_type) {
+	case BNX_DIR_TYPE_AVS:
+	case BNX_DIR_TYPE_EXP_ROM_MBA:
+	case BNX_DIR_TYPE_PCIE:
+	case BNX_DIR_TYPE_TSCF_UCODE:
+	case BNX_DIR_TYPE_EXT_PHY:
+	case BNX_DIR_TYPE_CCM:
+	case BNX_DIR_TYPE_ISCSI_BOOT:
+	case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
+	case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+		return true;
+	}
+
+	return false;
+}
+
+static bool bnxt_dir_type_is_executable(u16 dir_type)
+{
+	return bnxt_dir_type_is_ape_bin_format(dir_type) ||
+		bnxt_dir_type_is_unprotected_exec_format(dir_type);
+}
+
+static int bnxt_flash_firmware_from_file(struct net_device *dev,
+					 u16 dir_type,
+					 const char *filename)
+{
+	const struct firmware  *fw;
+	int			rc;
+
+	if (bnxt_dir_type_is_executable(dir_type) == false)
+		return -EINVAL;
+
+	rc = request_firmware(&fw, filename, &dev->dev);
+	if (rc != 0) {
+		netdev_err(dev, "Error %d requesting firmware file: %s\n",
+			   rc, filename);
+		return rc;
+	}
+	if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
+		rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+	else
+		rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+				      0, 0, fw->data, fw->size);
+	release_firmware(fw);
+	return rc;
+}
+
+static int bnxt_flash_package_from_file(struct net_device *dev,
+					char *filename)
+{
+	netdev_err(dev, "packages are not yet supported\n");
+	return -EINVAL;
+}
+
+static int bnxt_flash_device(struct net_device *dev,
+			     struct ethtool_flash *flash)
+{
+	if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
+		netdev_err(dev, "flashdev not supported from a virtual function\n");
+		return -EINVAL;
+	}
+
+	if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
+		return bnxt_flash_package_from_file(dev, flash->data);
+
+	return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
+}
+
+static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc;
+	struct hwrm_nvm_get_dir_info_input req = {0};
+	struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc) {
+		*entries = le32_to_cpu(output->entries);
+		*length = le32_to_cpu(output->entry_length);
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_get_eeprom_len(struct net_device *dev)
+{
+	/* The -1 return value allows the entire 32-bit range of offsets to be
+	 * passed via the ethtool command-line utility.
+	 */
+	return -1;
+}
+
+static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc;
+	u32 dir_entries;
+	u32 entry_length;
+	u8 *buf;
+	size_t buflen;
+	dma_addr_t dma_handle;
+	struct hwrm_nvm_get_dir_entries_input req = {0};
+
+	rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
+	if (rc != 0)
+		return rc;
+
+	/* Insert 2 bytes of directory info (count and size of entries) */
+	if (len < 2)
+		return -EINVAL;
+
+	*data++ = dir_entries;
+	*data++ = entry_length;
+	len -= 2;
+	memset(data, 0xff, len);
+
+	buflen = dir_entries * entry_length;
+	buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle,
+				 GFP_KERNEL);
+	if (!buf) {
+		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+			   (unsigned)buflen);
+		return -ENOMEM;
+	}
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1);
+	req.host_dest_addr = cpu_to_le64(dma_handle);
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc == 0)
+		memcpy(data, buf, len > buflen ? buflen : len);
+	dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle);
+	return rc;
+}
+
+static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+			       u32 length, u8 *data)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	int rc;
+	u8 *buf;
+	dma_addr_t dma_handle;
+	struct hwrm_nvm_read_input req = {0};
+
+	buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle,
+				 GFP_KERNEL);
+	if (!buf) {
+		netdev_err(dev, "dma_alloc_coherent failure, length = %u\n",
+			   (unsigned)length);
+		return -ENOMEM;
+	}
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1);
+	req.host_dest_addr = cpu_to_le64(dma_handle);
+	req.dir_idx = cpu_to_le16(index);
+	req.offset = cpu_to_le32(offset);
+	req.len = cpu_to_le32(length);
+
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (rc == 0)
+		memcpy(data, buf, length);
+	dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle);
+	return rc;
+}
+
+static int bnxt_get_eeprom(struct net_device *dev,
+			   struct ethtool_eeprom *eeprom,
+			   u8 *data)
+{
+	u32 index;
+	u32 offset;
+
+	if (eeprom->offset == 0) /* special offset value to get directory */
+		return bnxt_get_nvram_directory(dev, eeprom->len, data);
+
+	index = eeprom->offset >> 24;
+	offset = eeprom->offset & 0xffffff;
+
+	if (index == 0) {
+		netdev_err(dev, "unsupported index value: %d\n", index);
+		return -EINVAL;
+	}
+
+	return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
+}
+
+static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct hwrm_nvm_erase_dir_entry_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1);
+	req.dir_idx = cpu_to_le16(index);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_set_eeprom(struct net_device *dev,
+			   struct ethtool_eeprom *eeprom,
+			   u8 *data)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	u8 index, dir_op;
+	u16 type, ext, ordinal, attr;
+
+	if (!BNXT_PF(bp)) {
+		netdev_err(dev, "NVM write not supported from a virtual function\n");
+		return -EINVAL;
+	}
+
+	type = eeprom->magic >> 16;
+
+	if (type == 0xffff) { /* special value for directory operations */
+		index = eeprom->magic & 0xff;
+		dir_op = eeprom->magic >> 8;
+		if (index == 0)
+			return -EINVAL;
+		switch (dir_op) {
+		case 0x0e: /* erase */
+			if (eeprom->offset != ~eeprom->magic)
+				return -EINVAL;
+			return bnxt_erase_nvram_directory(dev, index - 1);
+		default:
+			return -EINVAL;
+		}
+	}
+
+	/* Create or re-write an NVM item: */
+	if (bnxt_dir_type_is_executable(type) == true)
+		return -EINVAL;
+	ext = eeprom->magic & 0xffff;
+	ordinal = eeprom->offset >> 16;
+	attr = eeprom->offset & 0xffff;
+
+	return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data,
+				eeprom->len);
+}
+
+const struct ethtool_ops bnxt_ethtool_ops = {
+	.get_settings		= bnxt_get_settings,
+	.set_settings		= bnxt_set_settings,
+	.get_pauseparam		= bnxt_get_pauseparam,
+	.set_pauseparam		= bnxt_set_pauseparam,
+	.get_drvinfo		= bnxt_get_drvinfo,
+	.get_coalesce		= bnxt_get_coalesce,
+	.set_coalesce		= bnxt_set_coalesce,
+	.get_msglevel		= bnxt_get_msglevel,
+	.set_msglevel		= bnxt_set_msglevel,
+	.get_sset_count		= bnxt_get_sset_count,
+	.get_strings		= bnxt_get_strings,
+	.get_ethtool_stats	= bnxt_get_ethtool_stats,
+	.set_ringparam		= bnxt_set_ringparam,
+	.get_ringparam		= bnxt_get_ringparam,
+	.get_channels		= bnxt_get_channels,
+	.set_channels		= bnxt_set_channels,
+#ifdef CONFIG_RFS_ACCEL
+	.get_rxnfc		= bnxt_get_rxnfc,
+#endif
+	.get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
+	.get_rxfh_key_size      = bnxt_get_rxfh_key_size,
+	.get_rxfh               = bnxt_get_rxfh,
+	.flash_device		= bnxt_flash_device,
+	.get_eeprom_len         = bnxt_get_eeprom_len,
+	.get_eeprom             = bnxt_get_eeprom,
+	.set_eeprom		= bnxt_set_eeprom,
+	.get_link		= bnxt_get_link,
+};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
new file mode 100644
index 0000000..98fa81e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -0,0 +1,17 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_ETHTOOL_H
+#define BNXT_ETHTOOL_H
+
+extern const struct ethtool_ops bnxt_ethtool_ops;
+
+u32 bnxt_fw_to_ethtool_speed(u16);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
new file mode 100644
index 0000000..e0aac65
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -0,0 +1,104 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __BNXT_FW_HDR_H__
+#define __BNXT_FW_HDR_H__
+
+#define BNXT_FIRMWARE_BIN_SIGNATURE     0x1a4d4342	/* "BCM"+0x1a */
+
+enum SUPPORTED_FAMILY {
+	DEVICE_5702_3_4_FAMILY,		/* 0  - Denali, Vinson, K2 */
+	DEVICE_5705_FAMILY,		/* 1  - Bachelor */
+	DEVICE_SHASTA_FAMILY,		/* 2  - 5751 */
+	DEVICE_5706_FAMILY,		/* 3  - Teton */
+	DEVICE_5714_FAMILY,		/* 4  - Hamilton */
+	DEVICE_STANFORD_FAMILY,		/* 5  - 5755 */
+	DEVICE_STANFORD_ME_FAMILY,	/* 6  - 5756 */
+	DEVICE_SOLEDAD_FAMILY,		/* 7  - 5761[E] */
+	DEVICE_CILAI_FAMILY,		/* 8  - 57780/60/90/91 */
+	DEVICE_ASPEN_FAMILY,		/* 9  - 57781/85/61/65/91/95 */
+	DEVICE_ASPEN_PLUS_FAMILY,	/* 10 - 57786 */
+	DEVICE_LOGAN_FAMILY,		/* 11 - Any device in the Logan family
+					 */
+	DEVICE_LOGAN_5762,		/* 12 - Logan Enterprise (aka Columbia)
+					 */
+	DEVICE_LOGAN_57767,		/* 13 - Logan Client */
+	DEVICE_LOGAN_57787,		/* 14 - Logan Consumer */
+	DEVICE_LOGAN_5725,		/* 15 - Logan Server (TruManage-enabled)
+					 */
+	DEVICE_SAWTOOTH_FAMILY,		/* 16 - 5717/18 */
+	DEVICE_COTOPAXI_FAMILY,		/* 17 - 5719 */
+	DEVICE_SNAGGLETOOTH_FAMILY,	/* 18 - 5720 */
+	DEVICE_CUMULUS_FAMILY,		/* 19 - Cumulus/Whitney */
+	MAX_DEVICE_FAMILY
+};
+
+enum SUPPORTED_CODE {
+	CODE_ASF1,		/* 0  - ASF VERSION 1.03 <deprecated> */
+	CODE_ASF2,		/* 1  - ASF VERSION 2.00 <deprecated> */
+	CODE_PASSTHRU,		/* 2  - PassThru         <deprecated> */
+	CODE_PT_SEC,		/* 3  - PassThru with security <deprecated> */
+	CODE_UMP,		/* 4  - UMP                     <deprecated> */
+	CODE_BOOT,		/* 5  - Bootcode */
+	CODE_DASH,		/* 6  - TruManage (DASH + ASF + PMCI)
+				 *	Management firmwares
+				 */
+	CODE_MCTP_PASSTHRU,	/* 7  - NCSI / MCTP Passt-hrough firmware */
+	CODE_PM_OFFLOAD,	/* 8  - Power-Management Proxy Offload firmwares
+				 */
+	CODE_MDNS_SD_OFFLOAD,	/* 9  - Multicast DNS Service Discovery Proxys
+				 *	Offload firmware
+				 */
+	CODE_DISC_OFFLOAD,	/* 10 - Discovery Offload firmware */
+	CODE_MUSTANG,		/* 11 - I2C Error reporting APE firmwares
+				 *	<deprecated>
+				 */
+	CODE_ARP_BATCH,		/* 12 - ARP Batch firmware */
+	CODE_SMASH,		/* 13 - TruManage (SMASH + DCMI/IPMI + PMCI)
+				 *	Management firmware
+				 */
+	CODE_APE_DIAG,		/* 14 - APE Test Diag firmware */
+	CODE_APE_PATCH,		/* 15 - APE Patch firmware */
+	CODE_TANG_PATCH,	/* 16 - TANG Patch firmware */
+	CODE_KONG_FW,		/* 17 - KONG firmware */
+	CODE_KONG_PATCH,	/* 18 - KONG Patch firmware */
+	CODE_BONO_FW,		/* 19 - BONO firmware */
+	CODE_BONO_PATCH,	/* 20 - BONO Patch firmware */
+
+	MAX_CODE_TYPE,
+};
+
+enum SUPPORTED_MEDIA {
+	MEDIA_COPPER,		/* 0 */
+	MEDIA_FIBER,		/* 1 */
+	MEDIA_NONE,		/* 2 */
+	MEDIA_COPPER_FIBER,	/* 3 */
+	MAX_MEDIA_TYPE,
+};
+
+struct bnxt_fw_header {
+	__le32 signature;	/* constains the constant value of
+				 * BNXT_Firmware_Bin_Signatures
+				 */
+	u8 flags;		/* reserved for ChiMP use */
+	u8 code_type;		/* enum SUPPORTED_CODE */
+	u8 device;		/* enum SUPPORTED_FAMILY */
+	u8 media;		/* enum SUPPORTED_MEDIA */
+	u8 version[16];		/* the null terminated version string to
+				 * indicate the version of the
+				 * file, this will be copied from the binary
+				 * file version string
+				 */
+	u8 build;
+	u8 revision;
+	u8 minor_ver;
+	u8 major_ver;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
new file mode 100644
index 0000000..70fc825
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -0,0 +1,4046 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_HSI_H
+#define BNXT_HSI_H
+
+/* per-context HW statistics -- chip view */
+struct ctx_hw_stats  {
+	__le64 rx_ucast_pkts;
+	__le64 rx_mcast_pkts;
+	__le64 rx_bcast_pkts;
+	__le64 rx_discard_pkts;
+	__le64 rx_drop_pkts;
+	__le64 rx_ucast_bytes;
+	__le64 rx_mcast_bytes;
+	__le64 rx_bcast_bytes;
+	__le64 tx_ucast_pkts;
+	__le64 tx_mcast_pkts;
+	__le64 tx_bcast_pkts;
+	__le64 tx_discard_pkts;
+	__le64 tx_drop_pkts;
+	__le64 tx_ucast_bytes;
+	__le64 tx_mcast_bytes;
+	__le64 tx_bcast_bytes;
+	__le64 tpa_pkts;
+	__le64 tpa_bytes;
+	__le64 tpa_events;
+	__le64 tpa_aborts;
+};
+
+/* Statistics Ejection Buffer Completion Record (16 bytes) */
+struct eject_cmpl {
+	__le16 type;
+	#define EJECT_CMPL_TYPE_MASK				    0x3fUL
+	#define EJECT_CMPL_TYPE_SFT				    0
+	#define EJECT_CMPL_TYPE_STAT_EJECT			   (0x1aUL << 0)
+	__le16 len;
+	__le32 opaque;
+	__le32 v;
+	#define EJECT_CMPL_V					    0x1UL
+	__le32 unused_2;
+};
+
+/* HWRM Completion Record (16 bytes) */
+struct hwrm_cmpl {
+	__le16 type;
+	#define HWRM_CMPL_TYPE_MASK				    0x3fUL
+	#define HWRM_CMPL_TYPE_SFT				    0
+	#define HWRM_CMPL_TYPE_HWRM_DONE			   (0x20UL << 0)
+	__le16 sequence_id;
+	__le32 unused_1;
+	__le32 v;
+	#define HWRM_CMPL_V					    0x1UL
+	__le32 unused_3;
+};
+
+/* HWRM Forwarded Request (16 bytes) */
+struct hwrm_fwd_req_cmpl {
+	__le16 req_len_type;
+	#define HWRM_FWD_REQ_CMPL_TYPE_MASK			    0x3fUL
+	#define HWRM_FWD_REQ_CMPL_TYPE_SFT			    0
+	#define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ		   (0x22UL << 0)
+	#define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK			    0xffc0UL
+	#define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT			    6
+	__le16 source_id;
+	__le32 unused_0;
+	__le32 req_buf_addr_v[2];
+	#define HWRM_FWD_REQ_CMPL_V				    0x1UL
+	#define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK		    0xfffffffeUL
+	#define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT		    1
+};
+
+/* HWRM Forwarded Response (16 bytes) */
+struct hwrm_fwd_resp_cmpl {
+	__le16 type;
+	#define HWRM_FWD_RESP_CMPL_TYPE_MASK			    0x3fUL
+	#define HWRM_FWD_RESP_CMPL_TYPE_SFT			    0
+	#define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP		   (0x24UL << 0)
+	__le16 source_id;
+	__le16 resp_len;
+	__le16 unused_1;
+	__le32 resp_buf_addr_v[2];
+	#define HWRM_FWD_RESP_CMPL_V				    0x1UL
+	#define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK		    0xfffffffeUL
+	#define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT		    1
+};
+
+/* HWRM Asynchronous Event Completion Record (16 bytes) */
+struct hwrm_async_event_cmpl {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK		    0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT			    0
+	#define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT       (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE    (0x1UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE  (0x2UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE  (0x3UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD   (0x10UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD     (0x11UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD     (0x20UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD       (0x20UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR		   (0x30UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR	   (0xffUL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_V			    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK		    0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT		    1
+	u8 unused_1[3];
+	__le32 event_data1;
+};
+
+/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
+struct hwrm_async_event_cmpl_link_status_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT  0
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V	    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_UP 0x1UL
+};
+
+/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
+struct hwrm_async_event_cmpl_link_mtu_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK    0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT     0
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V	    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK  0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT   1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK  0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT   0
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V	    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
+	#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
+};
+
+/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
+struct hwrm_async_event_cmpl_dcb_config_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK  0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT   0
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V	    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V      0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_unload {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK   0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT    0
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V	    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT  1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_func_drvr_load {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK     0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT      0
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V		    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK   0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT    1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_unload {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK     0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT      0
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V		    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK   0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT    1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
+struct hwrm_async_event_cmpl_pf_drvr_load {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK       0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT	    0
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x20UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V		    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK     0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT      1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
+struct hwrm_async_event_cmpl_vf_flr {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK		    0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT		    0
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR      (0x30UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V			    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK	    0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT	    1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_mac_addr_change {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT  0
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+	__le32 event_data2;
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V	    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
+	#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
+struct hwrm_async_event_cmpl_hwrm_error {
+	__le16 type;
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK	    0x3fUL
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT	    0
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+	__le16 event_id;
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+	__le32 event_data2;
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+	u8 opaque_v;
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V		    0x1UL
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK       0xfeUL
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT	    1
+	u8 unused_1[3];
+	__le32 event_data1;
+	#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* HW Resource Manager Specification 0.7.8 */
+#define HWRM_VERSION_MAJOR	0
+#define HWRM_VERSION_MINOR	7
+#define HWRM_VERSION_UPDATE	8
+
+#define HWRM_VERSION_STR	"0.7.8"
+/* Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
+ */
+#define HWRM_NA_SIGNATURE	((__le32)(-1))
+#define HWRM_MAX_REQ_LEN    (128)  /* hwrm_func_buf_rgtr */
+#define HWRM_MAX_RESP_LEN    (176)  /* hwrm_func_qstats */
+#define HW_HASH_INDEX_SIZE      0x80    /* 7 bit indirection table index. */
+#define HW_HASH_KEY_SIZE	40
+#define HWRM_RESP_VALID_KEY      1 /* valid key for HWRM response */
+/* Input (16 bytes) */
+struct input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+};
+
+/* Command numbering (8 bytes) */
+struct cmd_nums {
+	__le16 req_type;
+	#define HWRM_VER_GET					   (0x0UL)
+	#define HWRM_FUNC_DISABLE				   (0x10UL)
+	#define HWRM_FUNC_RESET				   (0x11UL)
+	#define HWRM_FUNC_GETFID				   (0x12UL)
+	#define HWRM_FUNC_VF_ALLOC				   (0x13UL)
+	#define HWRM_FUNC_VF_FREE				   (0x14UL)
+	#define HWRM_FUNC_QCAPS				   (0x15UL)
+	#define HWRM_FUNC_QCFG					   (0x16UL)
+	#define HWRM_FUNC_CFG					   (0x17UL)
+	#define HWRM_FUNC_QSTATS				   (0x18UL)
+	#define HWRM_FUNC_CLR_STATS				   (0x19UL)
+	#define HWRM_FUNC_DRV_UNRGTR				   (0x1aUL)
+	#define HWRM_FUNC_VF_RESC_FREE				   (0x1bUL)
+	#define HWRM_FUNC_VF_VNIC_IDS_QUERY			   (0x1cUL)
+	#define HWRM_FUNC_DRV_RGTR				   (0x1dUL)
+	#define HWRM_FUNC_DRV_QVER				   (0x1eUL)
+	#define HWRM_FUNC_BUF_RGTR				   (0x1fUL)
+	#define HWRM_FUNC_VF_CFG				   (0x20UL)
+	#define HWRM_PORT_PHY_CFG				   (0x20UL)
+	#define HWRM_PORT_MAC_CFG				   (0x21UL)
+	#define HWRM_PORT_ENABLE				   (0x22UL)
+	#define HWRM_PORT_QSTATS				   (0x23UL)
+	#define HWRM_PORT_LPBK_QSTATS				   (0x24UL)
+	#define HWRM_PORT_CLR_STATS				   (0x25UL)
+	#define HWRM_PORT_LPBK_CLR_STATS			   (0x26UL)
+	#define HWRM_PORT_PHY_QCFG				   (0x27UL)
+	#define HWRM_PORT_MAC_QCFG				   (0x28UL)
+	#define HWRM_PORT_BLINK_LED				   (0x29UL)
+	#define HWRM_QUEUE_QPORTCFG				   (0x30UL)
+	#define HWRM_QUEUE_QCFG				   (0x31UL)
+	#define HWRM_QUEUE_CFG					   (0x32UL)
+	#define HWRM_QUEUE_BUFFERS_QCFG			   (0x33UL)
+	#define HWRM_QUEUE_BUFFERS_CFG				   (0x34UL)
+	#define HWRM_QUEUE_PFCENABLE_QCFG			   (0x35UL)
+	#define HWRM_QUEUE_PFCENABLE_CFG			   (0x36UL)
+	#define HWRM_QUEUE_PRI2COS_QCFG			   (0x37UL)
+	#define HWRM_QUEUE_PRI2COS_CFG				   (0x38UL)
+	#define HWRM_QUEUE_COS2BW_QCFG				   (0x39UL)
+	#define HWRM_QUEUE_COS2BW_CFG				   (0x3aUL)
+	#define HWRM_VNIC_ALLOC				   (0x40UL)
+	#define HWRM_VNIC_FREE					   (0x41UL)
+	#define HWRM_VNIC_CFG					   (0x42UL)
+	#define HWRM_VNIC_QCFG					   (0x43UL)
+	#define HWRM_VNIC_TPA_CFG				   (0x44UL)
+	#define HWRM_VNIC_TPA_QCFG				   (0x45UL)
+	#define HWRM_VNIC_RSS_CFG				   (0x46UL)
+	#define HWRM_VNIC_RSS_QCFG				   (0x47UL)
+	#define HWRM_VNIC_PLCMODES_CFG				   (0x48UL)
+	#define HWRM_VNIC_PLCMODES_QCFG			   (0x49UL)
+	#define HWRM_RING_ALLOC				   (0x50UL)
+	#define HWRM_RING_FREE					   (0x51UL)
+	#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS		   (0x52UL)
+	#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS		   (0x53UL)
+	#define HWRM_RING_RESET				   (0x5eUL)
+	#define HWRM_RING_GRP_ALLOC				   (0x60UL)
+	#define HWRM_RING_GRP_FREE				   (0x61UL)
+	#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC			   (0x70UL)
+	#define HWRM_VNIC_RSS_COS_LB_CTX_FREE			   (0x71UL)
+	#define HWRM_ARB_GRP_ALLOC				   (0x80UL)
+	#define HWRM_ARB_GRP_CFG				   (0x81UL)
+	#define HWRM_CFA_L2_FILTER_ALLOC			   (0x90UL)
+	#define HWRM_CFA_L2_FILTER_FREE			   (0x91UL)
+	#define HWRM_CFA_L2_FILTER_CFG				   (0x92UL)
+	#define HWRM_CFA_L2_SET_RX_MASK			   (0x93UL)
+	#define HWRM_CFA_L2_SET_BCASTMCAST_MIRRORING		   (0x94UL)
+	#define HWRM_CFA_TUNNEL_FILTER_ALLOC			   (0x95UL)
+	#define HWRM_CFA_TUNNEL_FILTER_FREE			   (0x96UL)
+	#define HWRM_CFA_ENCAP_RECORD_ALLOC			   (0x97UL)
+	#define HWRM_CFA_ENCAP_RECORD_FREE			   (0x98UL)
+	#define HWRM_CFA_NTUPLE_FILTER_ALLOC			   (0x99UL)
+	#define HWRM_CFA_NTUPLE_FILTER_FREE			   (0x9aUL)
+	#define HWRM_CFA_NTUPLE_FILTER_CFG			   (0x9bUL)
+	#define HWRM_TUNNEL_DST_PORT_QUERY			   (0xa0UL)
+	#define HWRM_TUNNEL_DST_PORT_ALLOC			   (0xa1UL)
+	#define HWRM_TUNNEL_DST_PORT_FREE			   (0xa2UL)
+	#define HWRM_STAT_CTX_ALLOC				   (0xb0UL)
+	#define HWRM_STAT_CTX_FREE				   (0xb1UL)
+	#define HWRM_STAT_CTX_QUERY				   (0xb2UL)
+	#define HWRM_STAT_CTX_CLR_STATS			   (0xb3UL)
+	#define HWRM_FW_RESET					   (0xc0UL)
+	#define HWRM_FW_QSTATUS				   (0xc1UL)
+	#define HWRM_EXEC_FWD_RESP				   (0xd0UL)
+	#define HWRM_REJECT_FWD_RESP				   (0xd1UL)
+	#define HWRM_FWD_RESP					   (0xd2UL)
+	#define HWRM_FWD_ASYNC_EVENT_CMPL			   (0xd3UL)
+	#define HWRM_TEMP_MONITOR_QUERY			   (0xe0UL)
+	#define HWRM_MGMT_L2_FILTER_ALLOC			   (0x100UL)
+	#define HWRM_MGMT_L2_FILTER_FREE			   (0x101UL)
+	#define HWRM_DBG_READ_DIRECT				   (0xff10UL)
+	#define HWRM_DBG_READ_INDIRECT				   (0xff11UL)
+	#define HWRM_DBG_WRITE_DIRECT				   (0xff12UL)
+	#define HWRM_DBG_WRITE_INDIRECT			   (0xff13UL)
+	#define HWRM_DBG_DUMP					   (0xff14UL)
+	#define HWRM_NVM_MODIFY				   (0xfff4UL)
+	#define HWRM_NVM_VERIFY_UPDATE				   (0xfff5UL)
+	#define HWRM_NVM_GET_DEV_INFO				   (0xfff6UL)
+	#define HWRM_NVM_ERASE_DIR_ENTRY			   (0xfff7UL)
+	#define HWRM_NVM_MOD_DIR_ENTRY				   (0xfff8UL)
+	#define HWRM_NVM_FIND_DIR_ENTRY			   (0xfff9UL)
+	#define HWRM_NVM_GET_DIR_ENTRIES			   (0xfffaUL)
+	#define HWRM_NVM_GET_DIR_INFO				   (0xfffbUL)
+	#define HWRM_NVM_RAW_DUMP				   (0xfffcUL)
+	#define HWRM_NVM_READ					   (0xfffdUL)
+	#define HWRM_NVM_WRITE					   (0xfffeUL)
+	#define HWRM_NVM_RAW_WRITE_BLK				   (0xffffUL)
+	__le16 unused_0[3];
+};
+
+/* Return Codes (8 bytes) */
+struct ret_codes {
+	__le16 error_code;
+	#define HWRM_ERR_CODE_SUCCESS				   (0x0UL)
+	#define HWRM_ERR_CODE_FAIL				   (0x1UL)
+	#define HWRM_ERR_CODE_INVALID_PARAMS			   (0x2UL)
+	#define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED		   (0x3UL)
+	#define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR		   (0x4UL)
+	#define HWRM_ERR_CODE_INVALID_FLAGS			   (0x5UL)
+	#define HWRM_ERR_CODE_INVALID_ENABLES			   (0x6UL)
+	#define HWRM_ERR_CODE_HWRM_ERROR			   (0xfUL)
+	#define HWRM_ERR_CODE_UNKNOWN_ERR			   (0xfffeUL)
+	#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED		   (0xffffUL)
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_err_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 opaque_0;
+	__le16 opaque_1;
+	u8 opaque_2;
+	u8 valid;
+};
+
+/* Port Tx Statistics Formats (408 bytes) */
+struct tx_port_stats {
+	__le64 tx_64b_frames;
+	__le64 tx_65b_127b_frames;
+	__le64 tx_128b_255b_frames;
+	__le64 tx_256b_511b_frames;
+	__le64 tx_512b_1023b_frames;
+	__le64 tx_1024b_1518_frames;
+	__le64 tx_good_vlan_frames;
+	__le64 tx_1519b_2047_frames;
+	__le64 tx_2048b_4095b_frames;
+	__le64 tx_4096b_9216b_frames;
+	__le64 tx_9217b_16383b_frames;
+	__le64 tx_good_frames;
+	__le64 tx_total_frames;
+	__le64 tx_ucast_frames;
+	__le64 tx_mcast_frames;
+	__le64 tx_bcast_frames;
+	__le64 tx_pause_frames;
+	__le64 tx_pfc_frames;
+	__le64 tx_jabber_frames;
+	__le64 tx_fcs_err_frames;
+	__le64 tx_control_frames;
+	__le64 tx_oversz_frames;
+	__le64 tx_single_dfrl_frames;
+	__le64 tx_multi_dfrl_frames;
+	__le64 tx_single_coll_frames;
+	__le64 tx_multi_coll_frames;
+	__le64 tx_late_coll_frames;
+	__le64 tx_excessive_coll_frames;
+	__le64 tx_frag_frames;
+	__le64 tx_err;
+	__le64 tx_tagged_frames;
+	__le64 tx_dbl_tagged_frames;
+	__le64 tx_runt_frames;
+	__le64 tx_fifo_underruns;
+	__le64 tx_pfc_ena_frames_pri0;
+	__le64 tx_pfc_ena_frames_pri1;
+	__le64 tx_pfc_ena_frames_pri2;
+	__le64 tx_pfc_ena_frames_pri3;
+	__le64 tx_pfc_ena_frames_pri4;
+	__le64 tx_pfc_ena_frames_pri5;
+	__le64 tx_pfc_ena_frames_pri6;
+	__le64 tx_pfc_ena_frames_pri7;
+	__le64 tx_eee_lpi_events;
+	__le64 tx_eee_lpi_duration;
+	__le64 tx_llfc_logical_msgs;
+	__le64 tx_hcfc_msgs;
+	__le64 tx_total_collisions;
+	__le64 tx_bytes;
+	__le64 tx_xthol_frames;
+	__le64 tx_stat_discard;
+	__le64 tx_stat_error;
+};
+
+/* Port Rx Statistics Formats (528 bytes) */
+struct rx_port_stats {
+	__le64 rx_64b_frames;
+	__le64 rx_65b_127b_frames;
+	__le64 rx_128b_255b_frames;
+	__le64 rx_256b_511b_frames;
+	__le64 rx_512b_1023b_frames;
+	__le64 rx_1024b_1518_frames;
+	__le64 rx_good_vlan_frames;
+	__le64 rx_1519b_2047b_frames;
+	__le64 rx_2048b_4095b_frames;
+	__le64 rx_4096b_9216b_frames;
+	__le64 rx_9217b_16383b_frames;
+	__le64 rx_total_frames;
+	__le64 rx_ucast_frames;
+	__le64 rx_mcast_frames;
+	__le64 rx_bcast_frames;
+	__le64 rx_fcs_err_frames;
+	__le64 rx_ctrl_frames;
+	__le64 rx_pause_frames;
+	__le64 rx_pfc_frames;
+	__le64 rx_unsupported_opcode_frames;
+	__le64 rx_unsupported_da_pausepfc_frames;
+	__le64 rx_wrong_sa_frames;
+	__le64 rx_align_err_frames;
+	__le64 rx_oor_len_frames;
+	__le64 rx_code_err_frames;
+	__le64 rx_false_carrier_frames;
+	__le64 rx_ovrsz_frames;
+	__le64 rx_jbr_frames;
+	__le64 rx_mtu_err_frames;
+	__le64 rx_match_crc_frames;
+	__le64 rx_promiscuous_frames;
+	__le64 rx_tagged_frames;
+	__le64 rx_double_tagged_frames;
+	__le64 rx_trunc_frames;
+	__le64 rx_good_frames;
+	__le64 rx_pfc_xon2xoff_frames_pri0;
+	__le64 rx_pfc_xon2xoff_frames_pri1;
+	__le64 rx_pfc_xon2xoff_frames_pri2;
+	__le64 rx_pfc_xon2xoff_frames_pri3;
+	__le64 rx_pfc_xon2xoff_frames_pri4;
+	__le64 rx_pfc_xon2xoff_frames_pri5;
+	__le64 rx_pfc_xon2xoff_frames_pri6;
+	__le64 rx_pfc_xon2xoff_frames_pri7;
+	__le64 rx_pfc_ena_frames_pri0;
+	__le64 rx_pfc_ena_frames_pri1;
+	__le64 rx_pfc_ena_frames_pri2;
+	__le64 rx_pfc_ena_frames_pri3;
+	__le64 rx_pfc_ena_frames_pri4;
+	__le64 rx_pfc_ena_frames_pri5;
+	__le64 rx_pfc_ena_frames_pri6;
+	__le64 rx_pfc_ena_frames_pri7;
+	__le64 rx_sch_crc_err_frames;
+	__le64 rx_undrsz_frames;
+	__le64 rx_frag_frames;
+	__le64 rx_eee_lpi_events;
+	__le64 rx_eee_lpi_duration;
+	__le64 rx_llfc_physical_msgs;
+	__le64 rx_llfc_logical_msgs;
+	__le64 rx_llfc_msgs_with_crc_err;
+	__le64 rx_hcfc_msgs;
+	__le64 rx_hcfc_msgs_with_crc_err;
+	__le64 rx_bytes;
+	__le64 rx_runt_bytes;
+	__le64 rx_runt_frames;
+	__le64 rx_stat_discard;
+	__le64 rx_stat_err;
+};
+
+/* hwrm_ver_get */
+/* Input (24 bytes) */
+struct hwrm_ver_get_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 hwrm_intf_maj;
+	u8 hwrm_intf_min;
+	u8 hwrm_intf_upd;
+	u8 unused_0[5];
+};
+
+/* Output (128 bytes) */
+struct hwrm_ver_get_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 hwrm_intf_maj;
+	u8 hwrm_intf_min;
+	u8 hwrm_intf_upd;
+	u8 hwrm_intf_rsvd;
+	u8 hwrm_fw_maj;
+	u8 hwrm_fw_min;
+	u8 hwrm_fw_bld;
+	u8 hwrm_fw_rsvd;
+	u8 ape_fw_maj;
+	u8 ape_fw_min;
+	u8 ape_fw_bld;
+	u8 ape_fw_rsvd;
+	u8 kong_fw_maj;
+	u8 kong_fw_min;
+	u8 kong_fw_bld;
+	u8 kong_fw_rsvd;
+	u8 tang_fw_maj;
+	u8 tang_fw_min;
+	u8 tang_fw_bld;
+	u8 tang_fw_rsvd;
+	u8 bono_fw_maj;
+	u8 bono_fw_min;
+	u8 bono_fw_bld;
+	u8 bono_fw_rsvd;
+	char hwrm_fw_name[16];
+	char ape_fw_name[16];
+	char kong_fw_name[16];
+	char tang_fw_name[16];
+	char bono_fw_name[16];
+	__le16 chip_num;
+	u8 chip_rev;
+	u8 chip_metal;
+	u8 chip_bond_id;
+	u8 unused_0;
+	__le16 max_req_win_len;
+	__le16 max_resp_len;
+	__le16 def_req_timeout;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_disable */
+/* Input (24 bytes) */
+struct hwrm_func_disable_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_DISABLE_REQ_ENABLES_VF_ID_VALID		    0x1UL
+	__le16 vf_id;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_disable_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_reset */
+/* Input (24 bytes) */
+struct hwrm_func_reset_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_RESET_REQ_ENABLES_VF_ID_VALID		    0x1UL
+	__le16 vf_id;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_reset_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_getfid */
+/* Input (24 bytes) */
+struct hwrm_func_getfid_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_GETFID_REQ_ENABLES_PCI_ID			    0x1UL
+	__le16 pci_id;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_getfid_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 fid;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_func_vf_alloc */
+/* Input (24 bytes) */
+struct hwrm_func_vf_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID		    0x1UL
+	__le16 first_vf_id;
+	__le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 first_vf_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_func_vf_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID		    0x1UL
+	__le16 first_vf_id;
+	__le16 num_vfs;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_vf_cfg */
+/* Input (24 bytes) */
+struct hwrm_func_vf_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_VF_CFG_REQ_ENABLES_MTU			    0x1UL
+	#define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN		    0x2UL
+	__le16 mtu;
+	__le16 guest_vlan;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_qcaps */
+/* Input (24 bytes) */
+struct hwrm_func_qcaps_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 fid;
+	__le16 unused_0[3];
+};
+
+/* Output (80 bytes) */
+struct hwrm_func_qcaps_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 fid;
+	__le16 port_id;
+	__le32 flags;
+	#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED	    0x1UL
+	#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING      0x2UL
+	u8 perm_mac_address[6];
+	__le16 max_rsscos_ctx;
+	__le16 max_cmpl_rings;
+	__le16 max_tx_rings;
+	__le16 max_rx_rings;
+	__le16 max_l2_ctxs;
+	__le16 max_vnics;
+	__le16 first_vf_id;
+	__le16 max_vfs;
+	__le16 max_stat_ctx;
+	__le32 max_encap_records;
+	__le32 max_decap_records;
+	__le32 max_tx_em_flows;
+	__le32 max_tx_wm_flows;
+	__le32 max_rx_em_flows;
+	__le32 max_rx_wm_flows;
+	__le32 max_mcast_filters;
+	__le32 max_flow_id;
+	__le32 max_hw_ring_grps;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_func_cfg */
+/* Input (88 bytes) */
+struct hwrm_func_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 vf_id;
+	u8 unused_0;
+	u8 unused_1;
+	__le32 flags;
+	#define FUNC_CFG_REQ_FLAGS_PROM_MODE			    0x1UL
+	#define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK		    0x2UL
+	#define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK		    0x4UL
+	#define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH		    0x8UL
+	#define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH		    0x10UL
+	#define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE		    0x20UL
+	#define FUNC_CFG_REQ_FLAGS_DISABLE_STP			    0x40UL
+	#define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP		    0x80UL
+	#define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2		    0x100UL
+	__le32 enables;
+	#define FUNC_CFG_REQ_ENABLES_MTU			    0x1UL
+	#define FUNC_CFG_REQ_ENABLES_MRU			    0x2UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS		    0x4UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS		    0x8UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS		    0x10UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS		    0x20UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS		    0x40UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_VNICS			    0x80UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS		    0x100UL
+	#define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR		    0x200UL
+	#define FUNC_CFG_REQ_ENABLES_DFLT_VLAN			    0x400UL
+	#define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR		    0x800UL
+	#define FUNC_CFG_REQ_ENABLES_MIN_BW			    0x1000UL
+	#define FUNC_CFG_REQ_ENABLES_MAX_BW			    0x2000UL
+	#define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR		    0x4000UL
+	#define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE	    0x8000UL
+	#define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS		    0x10000UL
+	#define FUNC_CFG_REQ_ENABLES_EVB_MODE			    0x20000UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS		    0x40000UL
+	#define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS		    0x80000UL
+	__le16 mtu;
+	__le16 mru;
+	__le16 num_rsscos_ctxs;
+	__le16 num_cmpl_rings;
+	__le16 num_tx_rings;
+	__le16 num_rx_rings;
+	__le16 num_l2_ctxs;
+	__le16 num_vnics;
+	__le16 num_stat_ctxs;
+	__le16 num_hw_ring_grps;
+	u8 dflt_mac_addr[6];
+	__le16 dflt_vlan;
+	__be32 dflt_ip_addr[4];
+	__le32 min_bw;
+	__le32 max_bw;
+	__le16 async_event_cr;
+	u8 vlan_antispoof_mode;
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK	   (0x0UL << 0)
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN    (0x1UL << 0)
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
+	#define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+	u8 allowed_vlan_pris;
+	#define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_NOCHECK		   (0x0UL << 0)
+	#define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_VALIDATE_VLAN      (0x1UL << 0)
+	#define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_IF_VLANDNE  (0x2UL << 0)
+	#define FUNC_CFG_REQ_ALLOWED_VLAN_PRIS_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+	u8 evb_mode;
+	#define FUNC_CFG_REQ_EVB_MODE_NO_EVB			   (0x0UL << 0)
+	#define FUNC_CFG_REQ_EVB_MODE_VEB			   (0x1UL << 0)
+	#define FUNC_CFG_REQ_EVB_MODE_VEPA			   (0x2UL << 0)
+	u8 unused_2;
+	__le16 num_mcast_filters;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_qstats */
+/* Input (24 bytes) */
+struct hwrm_func_qstats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 fid;
+	__le16 unused_0[3];
+};
+
+/* Output (176 bytes) */
+struct hwrm_func_qstats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 tx_ucast_pkts;
+	__le64 tx_mcast_pkts;
+	__le64 tx_bcast_pkts;
+	__le64 tx_err_pkts;
+	__le64 tx_drop_pkts;
+	__le64 tx_ucast_bytes;
+	__le64 tx_mcast_bytes;
+	__le64 tx_bcast_bytes;
+	__le64 rx_ucast_pkts;
+	__le64 rx_mcast_pkts;
+	__le64 rx_bcast_pkts;
+	__le64 rx_err_pkts;
+	__le64 rx_drop_pkts;
+	__le64 rx_ucast_bytes;
+	__le64 rx_mcast_bytes;
+	__le64 rx_bcast_bytes;
+	__le64 rx_agg_pkts;
+	__le64 rx_agg_bytes;
+	__le64 rx_agg_events;
+	__le64 rx_agg_aborts;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_func_clr_stats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 fid;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_clr_stats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_vf_resc_free */
+/* Input (24 bytes) */
+struct hwrm_func_vf_resc_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 vf_id;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_resc_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_vf_vnic_ids_query */
+/* Input (32 bytes) */
+struct hwrm_func_vf_vnic_ids_query_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 vf_id;
+	u8 unused_0;
+	u8 unused_1;
+	__le32 max_vnic_id_cnt;
+	__le64 vnic_id_tbl_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vf_vnic_ids_query_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 vnic_id_cnt;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_func_drv_rgtr */
+/* Input (80 bytes) */
+struct hwrm_func_drv_rgtr_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE		    0x1UL
+	#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE		    0x2UL
+	__le32 enables;
+	#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE		    0x1UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_VER			    0x2UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP		    0x4UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD		    0x8UL
+	#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD	    0x10UL
+	__le16 os_type;
+	u8 ver_maj;
+	u8 ver_min;
+	u8 ver_upd;
+	u8 unused_0;
+	__le16 unused_1;
+	__le32 timestamp;
+	__le32 unused_2;
+	__le32 vf_req_fwd[8];
+	__le32 async_event_fwd[8];
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_rgtr_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr */
+/* Input (24 bytes) */
+struct hwrm_func_drv_unrgtr_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN     0x1UL
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_unrgtr_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_buf_rgtr */
+/* Input (128 bytes) */
+struct hwrm_func_buf_rgtr_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID		    0x1UL
+	#define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR		    0x2UL
+	__le16 vf_id;
+	__le16 req_buf_num_pages;
+	__le16 req_buf_page_size;
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B	   (0x4UL << 0)
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K		   (0xcUL << 0)
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K		   (0xdUL << 0)
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K	   (0x10UL << 0)
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M		   (0x16UL << 0)
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M		   (0x17UL << 0)
+	#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G		   (0x1eUL << 0)
+	__le16 req_buf_len;
+	__le16 resp_buf_len;
+	u8 unused_0;
+	u8 unused_1;
+	__le64 req_buf_page_addr0;
+	__le64 req_buf_page_addr1;
+	__le64 req_buf_page_addr2;
+	__le64 req_buf_page_addr3;
+	__le64 req_buf_page_addr4;
+	__le64 req_buf_page_addr5;
+	__le64 req_buf_page_addr6;
+	__le64 req_buf_page_addr7;
+	__le64 req_buf_page_addr8;
+	__le64 req_buf_page_addr9;
+	__le64 error_buf_addr;
+	__le64 resp_buf_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_buf_rgtr_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_func_drv_qver */
+/* Input (24 bytes) */
+struct hwrm_func_drv_qver_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define FUNC_DRV_QVER_REQ_ENABLES_OS_TYPE_VALID	    0x1UL
+	#define FUNC_DRV_QVER_REQ_ENABLES_VER_VALID		    0x2UL
+	__le16 fid;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_drv_qver_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 os_type;
+	u8 ver_maj;
+	u8 ver_min;
+	u8 ver_upd;
+	u8 unused_0;
+	u8 unused_1;
+	u8 valid;
+};
+
+/* hwrm_port_phy_cfg */
+/* Input (48 bytes) */
+struct hwrm_port_phy_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY		    0x1UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN		    0x2UL
+	#define PORT_PHY_CFG_REQ_FLAGS_FORCE			    0x4UL
+	#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG		    0x8UL
+	__le32 enables;
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE		    0x1UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX		    0x2UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE		    0x4UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED	    0x8UL
+	#define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK      0x10UL
+	#define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED		    0x20UL
+	#define PORT_PHY_CFG_REQ_ENABLES_LPBK			    0x40UL
+	#define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS		    0x80UL
+	#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE		    0x100UL
+	__le16 port_id;
+	__le16 force_link_speed;
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB	   (0x1UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB		   (0xaUL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB		   (0x14UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB	   (0x19UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB		   (0x64UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB		   (0xc8UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB		   (0xfaUL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB		   (0x190UL << 0)
+	#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB		   (0x1f4UL << 0)
+	u8 auto_mode;
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_NONE		   (0x0UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS		   (0x1UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED		   (0x2UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW	   (0x3UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_MODE_MASK		   (0x4UL << 0)
+	u8 auto_duplex;
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF		   (0x0UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL		   (0x1UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH		   (0x2UL << 0)
+	u8 auto_pause;
+	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX			    0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX			    0x2UL
+	u8 unused_0;
+	__le16 auto_link_speed;
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB		   (0x1UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB		   (0xaUL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB		   (0x14UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB		   (0x19UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB		   (0x64UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB		   (0xc8UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB		   (0xfaUL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB		   (0x190UL << 0)
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB		   (0x1f4UL << 0)
+	__le16 auto_link_speed_mask;
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD      0x1UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB	    0x2UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD	    0x4UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB	    0x8UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB	    0x10UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB	    0x20UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB	    0x40UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB	    0x80UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB	    0x100UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB	    0x200UL
+	#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB	    0x400UL
+	u8 wirespeed;
+	#define PORT_PHY_CFG_REQ_WIRESPEED_OFF			   (0x0UL << 0)
+	#define PORT_PHY_CFG_REQ_WIRESPEED_ON			   (0x1UL << 0)
+	u8 lpbk;
+	#define PORT_PHY_CFG_REQ_LPBK_NONE			   (0x0UL << 0)
+	#define PORT_PHY_CFG_REQ_LPBK_LOCAL			   (0x1UL << 0)
+	#define PORT_PHY_CFG_REQ_LPBK_REMOTE			   (0x2UL << 0)
+	u8 force_pause;
+	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX		    0x1UL
+	#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX		    0x2UL
+	u8 unused_1;
+	__le32 preemphasis;
+	__le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_phy_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_port_phy_qcfg */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 port_id;
+	__le16 unused_0[3];
+};
+
+/* Output (48 bytes) */
+struct hwrm_port_phy_qcfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 link;
+	#define PORT_PHY_QCFG_RESP_LINK_NO_LINK		   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SIGNAL			   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_LINK			   (0x2UL << 0)
+	u8 unused_0;
+	__le16 link_speed;
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB		   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB		   (0xaUL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB		   (0x14UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB		   (0x19UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB		   (0x64UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB		   (0xc8UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB		   (0xfaUL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB		   (0x190UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB		   (0x1f4UL << 0)
+	u8 duplex;
+	#define PORT_PHY_QCFG_RESP_DUPLEX_HALF			   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_DUPLEX_FULL			   (0x1UL << 0)
+	u8 pause;
+	#define PORT_PHY_QCFG_RESP_PAUSE_TX			    0x1UL
+	#define PORT_PHY_QCFG_RESP_PAUSE_RX			    0x2UL
+	__le16 support_speeds;
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD	    0x1UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB	    0x2UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD	    0x4UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB		    0x8UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB		    0x10UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB	    0x20UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB		    0x40UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB		    0x80UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB		    0x100UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB		    0x200UL
+	#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB		    0x400UL
+	__le16 force_link_speed;
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB	   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB	   (0xaUL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB	   (0x14UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB	   (0x19UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB	   (0x64UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB	   (0xc8UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB	   (0xfaUL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB	   (0x190UL << 0)
+	#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB	   (0x1f4UL << 0)
+	u8 auto_mode;
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE		   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS	   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED		   (0x2UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW	   (0x3UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK		   (0x4UL << 0)
+	u8 auto_pause;
+	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX		    0x1UL
+	#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX		    0x2UL
+	__le16 auto_link_speed;
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB	   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB		   (0xaUL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB		   (0x14UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB	   (0x19UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB	   (0x64UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB	   (0xc8UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB	   (0xfaUL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB	   (0x190UL << 0)
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB	   (0x1f4UL << 0)
+	__le16 auto_link_speed_mask;
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD    0x1UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB      0x2UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD      0x4UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB	    0x8UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB	    0x10UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB      0x20UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB       0x40UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB       0x80UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB       0x100UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB       0x200UL
+	#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB       0x400UL
+	u8 wirespeed;
+	#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF		   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_WIRESPEED_ON		   (0x1UL << 0)
+	u8 lpbk;
+	#define PORT_PHY_QCFG_RESP_LPBK_NONE			   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_LPBK_LOCAL			   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_LPBK_REMOTE			   (0x2UL << 0)
+	u8 force_pause;
+	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX		    0x1UL
+	#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX		    0x2UL
+	u8 duplex_setting;
+	#define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_HALF		   (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_DUPLEX_SETTING_FULL		   (0x1UL << 0)
+	__le32 preemphasis;
+	u8 phy_maj;
+	u8 phy_min;
+	u8 phy_bld;
+	u8 phy_type;
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4		   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4		   (0x2UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4		   (0x3UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4		   (0x4UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2		   (0x5UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4		   (0x6UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR		   (0x7UL << 0)
+	#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET		   (0x8UL << 0)
+	u8 media_type;
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP		   (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC		   (0x2UL << 0)
+	#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE		   (0x3UL << 0)
+	u8 transceiver_type;
+	#define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+	u8 phy_addr;
+	#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK		    0x1fUL
+	#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT		    0
+	u8 unused_2;
+	__le16 link_partner_adv_speeds;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB   0x2UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD   0x4UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB     0x8UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB     0x10UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB   0x20UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB    0x40UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB    0x80UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB    0x100UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB    0x200UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB    0x400UL
+	u8 link_partner_adv_auto_mode;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0)
+	u8 link_partner_adv_pause;
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX       0x1UL
+	#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX       0x2UL
+	u8 unused_3;
+	u8 unused_4;
+	u8 unused_5;
+	u8 valid;
+};
+
+/* hwrm_port_mac_cfg */
+/* Input (32 bytes) */
+struct hwrm_port_mac_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK		    0x1UL
+	#define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE       0x2UL
+	#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE       0x4UL
+	#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE	    0x8UL
+	__le32 enables;
+	#define PORT_MAC_CFG_REQ_ENABLES_IPG			    0x1UL
+	#define PORT_MAC_CFG_REQ_ENABLES_LPBK			    0x2UL
+	#define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI     0x4UL
+	#define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI		    0x8UL
+	#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI    0x10UL
+	#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI	    0x20UL
+	__le16 port_id;
+	u8 ipg;
+	u8 lpbk;
+	#define PORT_MAC_CFG_REQ_LPBK_NONE			   (0x0UL << 0)
+	#define PORT_MAC_CFG_REQ_LPBK_LOCAL			   (0x1UL << 0)
+	#define PORT_MAC_CFG_REQ_LPBK_REMOTE			   (0x2UL << 0)
+	u8 ivlan_pri2cos_map_pri;
+	u8 lcos_map_pri;
+	u8 tunnel_pri2cos_map_pri;
+	u8 dscp2pri_map_pri;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_mac_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 mru;
+	__le16 mtu;
+	u8 ipg;
+	u8 lpbk;
+	#define PORT_MAC_CFG_RESP_LPBK_NONE			   (0x0UL << 0)
+	#define PORT_MAC_CFG_RESP_LPBK_LOCAL			   (0x1UL << 0)
+	#define PORT_MAC_CFG_RESP_LPBK_REMOTE			   (0x2UL << 0)
+	u8 unused_0;
+	u8 valid;
+};
+
+/* hwrm_port_enable */
+/* Input (24 bytes) */
+struct hwrm_port_enable_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define PORT_ENABLE_REQ_FLAGS_FORWARD_TRAFFIC		    0x1UL
+	__le16 port_id;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_enable_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_port_qstats */
+/* Input (40 bytes) */
+struct hwrm_port_qstats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 port_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2[3];
+	u8 unused_3;
+	__le64 tx_stat_host_addr;
+	__le64 rx_stat_host_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_qstats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_qstats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (64 bytes) */
+struct hwrm_port_lpbk_qstats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 lpbk_ucast_frames;
+	__le64 lpbk_mcast_frames;
+	__le64 lpbk_bcast_frames;
+	__le64 lpbk_ucast_bytes;
+	__le64 lpbk_mcast_bytes;
+	__le64 lpbk_bcast_bytes;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_port_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_port_clr_stats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 port_id;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_clr_stats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats */
+/* Input (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_lpbk_clr_stats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_port_blink_led */
+/* Input (24 bytes) */
+struct hwrm_port_blink_led_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 num_blinks;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_blink_led_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_queue_qportcfg */
+/* Input (24 bytes) */
+struct hwrm_queue_qportcfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH			    0x1UL
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
+	#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	__le16 port_id;
+	__le16 unused_0;
+};
+
+/* Output (32 bytes) */
+struct hwrm_queue_qportcfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 max_configurable_queues;
+	u8 max_configurable_lossless_queues;
+	u8 queue_cfg_allowed;
+	u8 queue_buffers_cfg_allowed;
+	u8 queue_pfcenable_cfg_allowed;
+	u8 queue_pri2cos_cfg_allowed;
+	u8 queue_cos2bw_cfg_allowed;
+	u8 queue_id0;
+	u8 queue_id0_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id1;
+	u8 queue_id1_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id2;
+	u8 queue_id2_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id3;
+	u8 queue_id3_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id4;
+	u8 queue_id4_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id5;
+	u8 queue_id5_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id6;
+	u8 queue_id6_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 queue_id7;
+	u8 queue_id7_service_profile;
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
+	#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+	u8 valid;
+};
+
+/* hwrm_queue_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define QUEUE_CFG_REQ_FLAGS_PATH			    0x1UL
+	#define QUEUE_CFG_REQ_FLAGS_PATH_TX			   (0x0UL << 0)
+	#define QUEUE_CFG_REQ_FLAGS_PATH_RX			   (0x1UL << 0)
+	__le32 enables;
+	#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN			    0x1UL
+	#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE		    0x2UL
+	__le32 queue_id;
+	__le32 dflt_len;
+	u8 service_profile;
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY		   (0x0UL << 0)
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS		   (0x1UL << 0)
+	#define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN		   (0xffUL << 0)
+	u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_queue_buffers_cfg */
+/* Input (56 bytes) */
+struct hwrm_queue_buffers_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH		    0x1UL
+	#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
+	#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	__le32 enables;
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED		    0x1UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED		    0x2UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_GROUP		    0x4UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF		    0x8UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON		    0x10UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL		    0x20UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL		    0x40UL
+	#define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX		    0x80UL
+	__le32 queue_id;
+	__le32 reserved;
+	__le32 shared;
+	__le32 xoff;
+	__le32 xon;
+	__le32 full;
+	__le32 notfull;
+	__le32 max;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_buffers_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg */
+/* Input (24 bytes) */
+struct hwrm_queue_pfcenable_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI0_PFC_ENABLED   0x1UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI1_PFC_ENABLED   0x2UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI2_PFC_ENABLED   0x4UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI3_PFC_ENABLED   0x8UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI4_PFC_ENABLED   0x10UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI5_PFC_ENABLED   0x20UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI6_PFC_ENABLED   0x40UL
+	#define QUEUE_PFCENABLE_CFG_REQ_ENABLES_PRI7_PFC_ENABLED   0x80UL
+	__le16 port_id;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pfcenable_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_pri2cos_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH		    0x1UL
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	#define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN		    0x2UL
+	__le32 enables;
+	u8 port_id;
+	u8 pri0_cos;
+	u8 pri1_cos;
+	u8 pri2_cos;
+	u8 pri3_cos;
+	u8 pri4_cos;
+	u8 pri5_cos;
+	u8 pri6_cos;
+	u8 pri7_cos;
+	u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_pri2cos_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg */
+/* Input (128 bytes) */
+struct hwrm_queue_cos2bw_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	__le32 enables;
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID   0x1UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID   0x2UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID   0x4UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID   0x8UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID   0x10UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID   0x20UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID   0x40UL
+	#define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID   0x80UL
+	__le16 port_id;
+	u8 queue_id0;
+	u8 unused_0;
+	__le32 queue_id0_min_bw;
+	__le32 queue_id0_max_bw;
+	u8 queue_id0_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id0_pri_lvl;
+	u8 queue_id0_bw_weight;
+	u8 queue_id1;
+	__le32 queue_id1_min_bw;
+	__le32 queue_id1_max_bw;
+	u8 queue_id1_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id1_pri_lvl;
+	u8 queue_id1_bw_weight;
+	u8 queue_id2;
+	__le32 queue_id2_min_bw;
+	__le32 queue_id2_max_bw;
+	u8 queue_id2_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id2_pri_lvl;
+	u8 queue_id2_bw_weight;
+	u8 queue_id3;
+	__le32 queue_id3_min_bw;
+	__le32 queue_id3_max_bw;
+	u8 queue_id3_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id3_pri_lvl;
+	u8 queue_id3_bw_weight;
+	u8 queue_id4;
+	__le32 queue_id4_min_bw;
+	__le32 queue_id4_max_bw;
+	u8 queue_id4_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id4_pri_lvl;
+	u8 queue_id4_bw_weight;
+	u8 queue_id5;
+	__le32 queue_id5_min_bw;
+	__le32 queue_id5_max_bw;
+	u8 queue_id5_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id5_pri_lvl;
+	u8 queue_id5_bw_weight;
+	u8 queue_id6;
+	__le32 queue_id6_min_bw;
+	__le32 queue_id6_max_bw;
+	u8 queue_id6_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id6_pri_lvl;
+	u8 queue_id6_bw_weight;
+	u8 queue_id7;
+	__le32 queue_id7_min_bw;
+	__le32 queue_id7_max_bw;
+	u8 queue_id7_tsa_assign;
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP      (0x0UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS     (0x1UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
+	#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+	u8 queue_id7_pri_lvl;
+	u8 queue_id7_bw_weight;
+	u8 unused_1[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_cos2bw_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_vnic_alloc */
+/* Input (24 bytes) */
+struct hwrm_vnic_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define VNIC_ALLOC_REQ_FLAGS_DEFAULT			    0x1UL
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 vnic_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_vnic_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 vnic_id;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_vnic_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define VNIC_CFG_REQ_FLAGS_DEFAULT			    0x1UL
+	#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE		    0x2UL
+	__le32 enables;
+	#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP		    0x1UL
+	#define VNIC_CFG_REQ_ENABLES_RSS_RULE			    0x2UL
+	#define VNIC_CFG_REQ_ENABLES_COS_RULE			    0x4UL
+	#define VNIC_CFG_REQ_ENABLES_LB_RULE			    0x8UL
+	#define VNIC_CFG_REQ_ENABLES_MRU			    0x10UL
+	__le16 vnic_id;
+	__le16 dflt_ring_grp;
+	__le16 rss_rule;
+	__le16 cos_rule;
+	__le16 lb_rule;
+	__le16 mru;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_tpa_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define VNIC_TPA_CFG_REQ_FLAGS_TPA			    0x1UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA		    0x2UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE		    0x4UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_GRO			    0x8UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN		    0x10UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ       0x20UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK		    0x40UL
+	#define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK		    0x80UL
+	__le32 enables;
+	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS		    0x1UL
+	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS		    0x2UL
+	#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER		    0x4UL
+	#define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN		    0x8UL
+	__le16 vnic_id;
+	__le16 max_agg_segs;
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1		   (0x0UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2		   (0x1UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4		   (0x2UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8		   (0x3UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX		   (0x1fUL << 0)
+	__le16 max_aggs;
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_1			   (0x0UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_2			   (0x1UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_4			   (0x2UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_8			   (0x3UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_16			   (0x4UL << 0)
+	#define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX			   (0x7UL << 0)
+	u8 unused_0;
+	u8 unused_1;
+	__le32 max_agg_timer;
+	__le32 min_agg_len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_tpa_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg */
+/* Input (48 bytes) */
+struct hwrm_vnic_rss_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 hash_type;
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4		    0x1UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4		    0x2UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4		    0x4UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6		    0x8UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6		    0x10UL
+	#define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6		    0x20UL
+	__le32 unused_0;
+	__le64 ring_grp_tbl_addr;
+	__le64 hash_key_tbl_addr;
+	__le16 rss_ctx_idx;
+	__le16 unused_1[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg */
+/* Input (40 bytes) */
+struct hwrm_vnic_plcmodes_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT      0x1UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT	    0x2UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4		    0x4UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6		    0x8UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE		    0x10UL
+	#define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE		    0x20UL
+	__le32 enables;
+	#define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID   0x1UL
+	#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID     0x2UL
+	#define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID  0x4UL
+	__le32 vnic_id;
+	__le16 jumbo_thresh;
+	__le16 hds_offset;
+	__le16 hds_threshold;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_plcmodes_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc */
+/* Input (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 rss_cos_lb_ctx_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 rss_cos_lb_ctx_id;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_ring_alloc */
+/* Input (80 bytes) */
+struct hwrm_ring_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define RING_ALLOC_REQ_ENABLES_ARB_GRP_ID_VALID	    0x1UL
+	#define RING_ALLOC_REQ_ENABLES_INPUT_NUM_VALID		    0x2UL
+	#define RING_ALLOC_REQ_ENABLES_WEIGHT_VALID		    0x4UL
+	#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID	    0x8UL
+	#define RING_ALLOC_REQ_ENABLES_MIN_BW_VALID		    0x10UL
+	#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID		    0x20UL
+	u8 ring_type;
+	#define RING_ALLOC_REQ_RING_TYPE_CMPL			   (0x0UL << 0)
+	#define RING_ALLOC_REQ_RING_TYPE_TX			   (0x1UL << 0)
+	#define RING_ALLOC_REQ_RING_TYPE_RX			   (0x2UL << 0)
+	#define RING_ALLOC_REQ_RING_TYPE_STATUS		   (0x3UL << 0)
+	#define RING_ALLOC_REQ_RING_TYPE_CMD			   (0x4UL << 0)
+	u8 unused_0;
+	__le16 unused_1;
+	__le64 page_tbl_addr;
+	__le32 fbo;
+	u8 page_size;
+	u8 page_tbl_depth;
+	u8 unused_2;
+	u8 unused_3;
+	__le32 length;
+	__le16 logical_id;
+	__le16 cmpl_ring_id;
+	__le16 queue_id;
+	u8 unused_4;
+	u8 unused_5;
+	__le32 arb_grp_id;
+	__le16 input_number;
+	u8 unused_6;
+	u8 unused_7;
+	__le32 weight;
+	__le32 stat_ctx_id;
+	__le32 min_bw;
+	__le32 max_bw;
+	u8 int_mode;
+	#define RING_ALLOC_REQ_INT_MODE_LEGACY			   (0x0UL << 0)
+	#define RING_ALLOC_REQ_INT_MODE_MSI			   (0x1UL << 0)
+	#define RING_ALLOC_REQ_INT_MODE_MSIX			   (0x2UL << 0)
+	#define RING_ALLOC_REQ_INT_MODE_POLL			   (0x3UL << 0)
+	u8 unused_8[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 ring_id;
+	__le16 logical_ring_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_ring_free */
+/* Input (24 bytes) */
+struct hwrm_ring_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 ring_type;
+	#define RING_FREE_REQ_RING_TYPE_CMPL			   (0x0UL << 0)
+	#define RING_FREE_REQ_RING_TYPE_TX			   (0x1UL << 0)
+	#define RING_FREE_REQ_RING_TYPE_RX			   (0x2UL << 0)
+	#define RING_FREE_REQ_RING_TYPE_STATUS			   (0x3UL << 0)
+	#define RING_FREE_REQ_RING_TYPE_CMD			   (0x4UL << 0)
+	u8 unused_0;
+	__le16 ring_id;
+	__le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params */
+/* Input (24 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 ring_id;
+	__le16 unused_0[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 flags;
+	#define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+	#define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+	__le16 num_cmpl_dma_aggr;
+	__le16 num_cmpl_dma_aggr_during_int;
+	__le16 cmpl_aggr_dma_tmr;
+	__le16 cmpl_aggr_dma_tmr_during_int;
+	__le16 int_lat_tmr_min;
+	__le16 int_lat_tmr_max;
+	__le16 num_cmpl_aggr_int;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params */
+/* Input (40 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 ring_id;
+	__le16 flags;
+	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+	#define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+	__le16 num_cmpl_dma_aggr;
+	__le16 num_cmpl_dma_aggr_during_int;
+	__le16 cmpl_aggr_dma_tmr;
+	__le16 cmpl_aggr_dma_tmr_during_int;
+	__le16 int_lat_tmr_min;
+	__le16 int_lat_tmr_max;
+	__le16 num_cmpl_aggr_int;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_ring_reset */
+/* Input (24 bytes) */
+struct hwrm_ring_reset_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 ring_type;
+	#define RING_RESET_REQ_RING_TYPE_CMPL			   (0x0UL << 0)
+	#define RING_RESET_REQ_RING_TYPE_TX			   (0x1UL << 0)
+	#define RING_RESET_REQ_RING_TYPE_RX			   (0x2UL << 0)
+	#define RING_RESET_REQ_RING_TYPE_STATUS		   (0x3UL << 0)
+	#define RING_RESET_REQ_RING_TYPE_CMD			   (0x4UL << 0)
+	u8 unused_0;
+	__le16 ring_id;
+	__le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_reset_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_ring_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 cr;
+	__le16 rr;
+	__le16 ar;
+	__le16 sc;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 ring_group_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_ring_grp_free */
+/* Input (24 bytes) */
+struct hwrm_ring_grp_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 ring_group_id;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_ring_grp_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_arb_grp_alloc */
+/* Input (24 bytes) */
+struct hwrm_arb_grp_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 input_number;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 arb_grp_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_arb_grp_cfg */
+/* Input (32 bytes) */
+struct hwrm_arb_grp_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 arb_grp_id;
+	__le16 input_number;
+	__le16 tx_ring;
+	__le32 weight;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_arb_grp_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_alloc */
+/* Input (96 bytes) */
+struct hwrm_cfa_l2_filter_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH		    0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK		    0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP		    0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST	    0x8UL
+	__le32 enables;
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR	    0x1UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK       0x2UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN	    0x4UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK      0x8UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN	    0x10UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK      0x20UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR	    0x40UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK     0x80UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN	    0x100UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK    0x200UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN	    0x400UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK    0x800UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE	    0x1000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID		    0x2000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE	    0x4000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID	    0x8000UL
+	#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID     0x10000UL
+	u8 l2_addr[6];
+	u8 unused_0;
+	u8 unused_1;
+	u8 l2_addr_mask[6];
+	__le16 l2_ovlan;
+	__le16 l2_ovlan_mask;
+	__le16 l2_ivlan;
+	__le16 l2_ivlan_mask;
+	u8 unused_2;
+	u8 unused_3;
+	u8 t_l2_addr[6];
+	u8 unused_4;
+	u8 unused_5;
+	u8 t_l2_addr_mask[6];
+	__le16 t_l2_ovlan;
+	__le16 t_l2_ovlan_mask;
+	__le16 t_l2_ivlan;
+	__le16 t_l2_ivlan_mask;
+	u8 src_type;
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT		   (0x0UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF		   (0x1UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF		   (0x2UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC		   (0x3UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG		   (0x4UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE		   (0x5UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO		   (0x6UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG		   (0x7UL << 0)
+	u8 unused_6;
+	__le32 src_id;
+	u8 tunnel_type;
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL     (0x0UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN	   (0x1UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE	   (0x2UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE	   (0x3UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP	   (0x4UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE	   (0x5UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS	   (0x6UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT	   (0x7UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE	   (0x8UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL     (0xffUL << 0)
+	u8 unused_7;
+	__le16 dst_vnic_id;
+	__le16 mirror_vnic_id;
+	u8 pri_hint;
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER	   (0x0UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER     (0x1UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER     (0x2UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX		   (0x3UL << 0)
+	#define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN		   (0x4UL << 0)
+	u8 unused_8;
+	__le32 unused_9;
+	__le64 l2_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_l2_filter_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 l2_filter_id;
+	__le32 flow_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_l2_filter_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 l2_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_filter_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH		    0x1UL
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP		    0x2UL
+	__le32 enables;
+	#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_VNIC_ID_VALID    0x1UL
+	__le64 l2_filter_id;
+	__le32 dst_vnic_id;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_filter_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask */
+/* Input (40 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 dflt_vnic_id;
+	__le32 mask;
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST		    0x1UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST		    0x2UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST		    0x4UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST		    0x8UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS	    0x10UL
+	#define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST		    0x20UL
+	__le64 mc_tbl_addr;
+	__le32 num_mc_entries;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_l2_set_bcastmcast_mirroring */
+/* Input (32 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 dflt_vnic_id;
+	__le32 mirroring_flags;
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_MIRRORING 0x1UL
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_MIRRORING 0x2UL
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_BCAST_SRC_KNOCKOUT 0x4UL
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_MCAST_SRC_KNOCKOUT 0x8UL
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MIRRORING_FLAGS_VLAN_ID_VALID 0x10UL
+	__le16 vlan_id;
+	u8 bcast_domain;
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_PFONLY (0x0UL << 0)
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFS (0x1UL << 0)
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_BCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+	u8 mcast_domain;
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_PFONLY (0x0UL << 0)
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFS (0x1UL << 0)
+	#define CFA_L2_SET_BCASTMCAST_MIRRORING_REQ_MCAST_DOMAIN_ALLPFSVFS (0x2UL << 0)
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_l2_set_bcastmcast_mirroring_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc */
+/* Input (88 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK	    0x1UL
+	__le32 enables;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR	    0x2UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN       0x4UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR	    0x8UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE   0x10UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR      0x40UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE    0x80UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI	    0x100UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID    0x200UL
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+	__le64 l2_filter_id;
+	u8 l2_addr[6];
+	__le16 l2_ivlan;
+	__le32 l3_addr[4];
+	__le32 t_l3_addr[4];
+	u8 l3_addr_type;
+	u8 t_l3_addr_type;
+	u8 tunnel_type;
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     (0x1UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     (0x2UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     (0x3UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      (0x4UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    (0x5UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      (0x6UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       (0x7UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     (0x8UL << 0)
+	#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+	u8 unused_0;
+	__le32 vni;
+	__le32 dst_vnic_id;
+	__le32 mirror_vnic_id;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 tunnel_filter_id;
+	__le32 flow_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_tunnel_filter_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 tunnel_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_tunnel_filter_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_encap_record_alloc */
+/* Input (32 bytes) */
+struct hwrm_cfa_encap_record_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK	    0x1UL
+	u8 encap_type;
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN       (0x1UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE       (0x2UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE       (0x3UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP	   (0x4UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE      (0x5UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS	   (0x6UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN	   (0x7UL << 0)
+	#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE       (0x8UL << 0)
+	u8 unused_0;
+	__le16 unused_1;
+	__le32 encap_data[16];
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_encap_record_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 encap_record_id;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_encap_record_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 encap_record_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_encap_record_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc */
+/* Input (128 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK	    0x1UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP		    0x2UL
+	__le32 enables;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE      0x2UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE    0x4UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR    0x8UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE    0x10UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR     0x20UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR     0x80UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL    0x200UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT       0x400UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK  0x800UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT       0x1000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK  0x2000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT       0x4000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID    0x10000UL
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+	__le64 l2_filter_id;
+	u8 src_macaddr[6];
+	__be16 ethertype;
+	u8 ipaddr_type;
+	u8 ip_protocol;
+	__le16 dst_vnic_id;
+	__le16 mirror_vnic_id;
+	u8 tunnel_type;
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN     (0x1UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE     (0x2UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE     (0x3UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP      (0x4UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE    (0x5UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS      (0x6UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT       (0x7UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE     (0x8UL << 0)
+	#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+	u8 pri_hint;
+	__be32 src_ipaddr[4];
+	__be32 src_ipaddr_mask[4];
+	__be32 dst_ipaddr[4];
+	__be32 dst_ipaddr_mask[4];
+	__be16 src_port;
+	__be16 src_port_mask;
+	__be16 dst_port;
+	__be16 dst_port_mask;
+	__le64 ntuple_filter_id_hint;
+};
+
+/* Output (24 bytes) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 ntuple_filter_id;
+	__le32 flow_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_ntuple_filter_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 ntuple_filter_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg */
+/* Input (40 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_VNIC_ID_VALID 0x1UL
+	#define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID_VALID 0x2UL
+	__le32 unused_0;
+	__le64 ntuple_filter_id;
+	__le32 new_dst_vnic_id;
+	__le32 new_mirror_vnic_id;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_query_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 tunnel_type;
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NONTUNNEL   (0x0UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN       (0x1UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_NVGRE       (0x2UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2GRE       (0x3UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPIP	   (0x4UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE      (0x5UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_MPLS	   (0x6UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_STT	   (0x7UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE       (0x8UL << 0)
+	#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ANYTUNNEL   (0xffUL << 0)
+	u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_query_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 tunnel_dst_port_id;
+	__be16 tunnel_dst_port_val;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 tunnel_type;
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL   (0x0UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN       (0x1UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_NVGRE       (0x2UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2GRE       (0x3UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPIP	   (0x4UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE      (0x5UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_MPLS	   (0x6UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_STT	   (0x7UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE       (0x8UL << 0)
+	#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL   (0xffUL << 0)
+	u8 unused_0;
+	__be16 tunnel_dst_port_val;
+	__le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 tunnel_dst_port_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free */
+/* Input (24 bytes) */
+struct hwrm_tunnel_dst_port_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 tunnel_type;
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NONTUNNEL    (0x0UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN	   (0x1UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_NVGRE	   (0x2UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2GRE	   (0x3UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPIP	   (0x4UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE       (0x5UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_MPLS	   (0x6UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_STT	   (0x7UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE	   (0x8UL << 0)
+	#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL    (0xffUL << 0)
+	u8 unused_0;
+	__le16 tunnel_dst_port_id;
+	__le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_tunnel_dst_port_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_stat_ctx_alloc */
+/* Input (32 bytes) */
+struct hwrm_stat_ctx_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 stats_dma_addr;
+	__le32 update_period_ms;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 stat_ctx_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_stat_ctx_free */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 stat_ctx_id;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 stat_ctx_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_stat_ctx_query */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_query_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 stat_ctx_id;
+	__le32 unused_0;
+};
+
+/* Output (176 bytes) */
+struct hwrm_stat_ctx_query_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le64 tx_ucast_pkts;
+	__le64 tx_mcast_pkts;
+	__le64 tx_bcast_pkts;
+	__le64 tx_err_pkts;
+	__le64 tx_drop_pkts;
+	__le64 tx_ucast_bytes;
+	__le64 tx_mcast_bytes;
+	__le64 tx_bcast_bytes;
+	__le64 rx_ucast_pkts;
+	__le64 rx_mcast_pkts;
+	__le64 rx_bcast_pkts;
+	__le64 rx_err_pkts;
+	__le64 rx_drop_pkts;
+	__le64 rx_ucast_bytes;
+	__le64 rx_mcast_bytes;
+	__le64 rx_bcast_bytes;
+	__le64 rx_agg_pkts;
+	__le64 rx_agg_bytes;
+	__le64 rx_agg_events;
+	__le64 rx_agg_aborts;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_clr_stats_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 stat_ctx_id;
+	__le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_clr_stats_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_alloc */
+/* Input (56 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 flags;
+	#define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH		    0x1UL
+	#define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX		   (0x0UL << 0)
+	#define MGMT_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX		   (0x1UL << 0)
+	__le32 enables;
+	#define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDRESS	    0x1UL
+	#define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_OVLAN		    0x2UL
+	#define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_IVLAN		    0x4UL
+	#define MGMT_L2_FILTER_ALLOC_REQ_ENABLES_ACTION_ID	    0x8UL
+	u8 l2_address[6];
+	u8 unused_0;
+	u8 unused_1;
+	u8 l2_address_mask[6];
+	__le16 ovlan;
+	__le16 ovlan_mask;
+	__le16 ivlan;
+	__le16 ivlan_mask;
+	u8 unused_2;
+	u8 unused_3;
+	__le32 action_id;
+	u8 action_bypass;
+	#define MGMT_L2_FILTER_ALLOC_REQ_ACTION_BYPASS		    0x1UL
+	u8 unused_5[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_alloc_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 mgmt_l2_filter_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_mgmt_l2_filter_free */
+/* Input (24 bytes) */
+struct hwrm_mgmt_l2_filter_free_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 mgmt_l2_filter_id;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_mgmt_l2_filter_free_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_raw_write_blk */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_write_blk_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 host_src_addr;
+	__le32 dest_addr;
+	__le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_write_blk_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_read */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 host_dest_addr;
+	__le16 dir_idx;
+	u8 unused_0;
+	u8 unused_1;
+	__le32 offset;
+	__le32 len;
+	__le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_read_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_raw_dump */
+/* Input (32 bytes) */
+struct hwrm_nvm_raw_dump_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 host_dest_addr;
+	__le32 offset;
+	__le32 len;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_raw_dump_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries */
+/* Input (24 bytes) */
+struct hwrm_nvm_get_dir_entries_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 host_dest_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_get_dir_entries_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dir_info_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_get_dir_info_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 entries;
+	__le32 entry_length;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_write */
+/* Input (40 bytes) */
+struct hwrm_nvm_write_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 host_src_addr;
+	__le16 dir_type;
+	__le16 dir_ordinal;
+	__le16 dir_ext;
+	__le16 dir_attr;
+	__le32 dir_data_length;
+	__le16 option;
+	__le16 flags;
+	#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG	    0x1UL
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_write_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_modify */
+/* Input (40 bytes) */
+struct hwrm_nvm_modify_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le64 host_src_addr;
+	__le16 dir_idx;
+	u8 unused_0;
+	u8 unused_1;
+	__le32 offset;
+	__le32 len;
+	__le32 unused_2;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_modify_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_find_dir_entry_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID       0x1UL
+	__le16 dir_idx;
+	__le16 dir_type;
+	__le16 dir_ordinal;
+	__le16 dir_ext;
+	u8 opt_ordinal;
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK	    0x3UL
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT		    0
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ		   (0x0UL << 0)
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE		   (0x1UL << 0)
+	#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT		   (0x2UL << 0)
+	u8 unused_1[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_find_dir_entry_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 dir_item_length;
+	__le32 dir_data_length;
+	__le32 fw_ver;
+	__le16 dir_ordinal;
+	__le16 dir_idx;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry */
+/* Input (24 bytes) */
+struct hwrm_nvm_erase_dir_entry_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 dir_idx;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_erase_dir_entry_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info */
+/* Input (16 bytes) */
+struct hwrm_nvm_get_dev_info_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (32 bytes) */
+struct hwrm_nvm_get_dev_info_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le16 manufacturer_id;
+	__le16 device_id;
+	__le32 sector_size;
+	__le32 nvram_size;
+	__le32 reserved_size;
+	__le32 available_size;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry */
+/* Input (32 bytes) */
+struct hwrm_nvm_mod_dir_entry_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 enables;
+	#define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM		    0x1UL
+	__le16 dir_idx;
+	__le16 dir_ordinal;
+	__le16 dir_ext;
+	__le16 dir_attr;
+	__le32 checksum;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_mod_dir_entry_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_nvm_verify_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_verify_update_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 dir_type;
+	__le16 dir_ordinal;
+	__le16 dir_ext;
+	__le16 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_nvm_verify_update_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_exec_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_exec_fwd_resp_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 encap_request[24];
+	__le16 encap_resp_target_id;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_exec_fwd_resp_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_reject_fwd_resp */
+/* Input (120 bytes) */
+struct hwrm_reject_fwd_resp_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le32 encap_request[24];
+	__le16 encap_resp_target_id;
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_reject_fwd_resp_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_fwd_resp */
+/* Input (40 bytes) */
+struct hwrm_fwd_resp_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 encap_resp_target_id;
+	__le16 encap_resp_cmpl_ring;
+	__le16 encap_resp_len;
+	u8 unused_0;
+	u8 unused_1;
+	__le64 encap_resp_addr;
+	__le32 encap_resp[24];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_resp_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl */
+/* Input (32 bytes) */
+struct hwrm_fwd_async_event_cmpl_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	__le16 encap_async_event_target_id;
+	u8 unused_0;
+	u8 unused_1;
+	u8 unused_2[3];
+	u8 unused_3;
+	__le32 encap_async_event_cmpl[4];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fwd_async_event_cmpl_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	__le32 unused_0;
+	u8 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 valid;
+};
+
+/* hwrm_fw_reset */
+/* Input (24 bytes) */
+struct hwrm_fw_reset_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 embedded_proc_type;
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIMP		   (0x0UL << 0)
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_APE		   (0x1UL << 0)
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_KONG		   (0x2UL << 0)
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BONO		   (0x3UL << 0)
+	#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_TANG		   (0x4UL << 0)
+	u8 selfrst_status;
+	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE	   (0x0UL << 0)
+	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP	   (0x1UL << 0)
+	#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST	   (0x2UL << 0)
+	__le16 unused_0[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_reset_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 selfrst_status;
+	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE	   (0x0UL << 0)
+	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP	   (0x1UL << 0)
+	#define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST       (0x2UL << 0)
+	u8 unused_0;
+	__le16 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_fw_qstatus */
+/* Input (24 bytes) */
+struct hwrm_fw_qstatus_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+	u8 embedded_proc_type;
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIMP	   (0x0UL << 0)
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_APE		   (0x1UL << 0)
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_KONG		   (0x2UL << 0)
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BONO		   (0x3UL << 0)
+	#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_TANG		   (0x4UL << 0)
+	u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_qstatus_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 selfrst_status;
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE	   (0x0UL << 0)
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP	   (0x1UL << 0)
+	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST     (0x2UL << 0)
+	u8 unused_0;
+	__le16 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+/* hwrm_temp_monitor_query */
+/* Input (16 bytes) */
+struct hwrm_temp_monitor_query_input {
+	__le16 req_type;
+	__le16 cmpl_ring;
+	__le16 seq_id;
+	__le16 target_id;
+	__le64 resp_addr;
+};
+
+/* Output (16 bytes) */
+struct hwrm_temp_monitor_query_output {
+	__le16 error_code;
+	__le16 req_type;
+	__le16 seq_id;
+	__le16 resp_len;
+	u8 temp;
+	u8 unused_0;
+	__le16 unused_1;
+	u8 unused_2;
+	u8 unused_3;
+	u8 unused_4;
+	u8 valid;
+};
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
new file mode 100644
index 0000000..3cf3e1b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -0,0 +1,59 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _BNXT_NVM_DEFS_H_
+#define _BNXT_NVM_DEFS_H_
+
+enum bnxt_nvm_directory_type {
+	BNX_DIR_TYPE_UNUSED = 0,
+	BNX_DIR_TYPE_PKG_LOG = 1,
+	BNX_DIR_TYPE_CHIMP_PATCH = 3,
+	BNX_DIR_TYPE_BOOTCODE = 4,
+	BNX_DIR_TYPE_VPD = 5,
+	BNX_DIR_TYPE_EXP_ROM_MBA = 6,
+	BNX_DIR_TYPE_AVS = 7,
+	BNX_DIR_TYPE_PCIE = 8,
+	BNX_DIR_TYPE_PORT_MACRO = 9,
+	BNX_DIR_TYPE_APE_FW = 10,
+	BNX_DIR_TYPE_APE_PATCH = 11,
+	BNX_DIR_TYPE_KONG_FW = 12,
+	BNX_DIR_TYPE_KONG_PATCH = 13,
+	BNX_DIR_TYPE_BONO_FW = 14,
+	BNX_DIR_TYPE_BONO_PATCH = 15,
+	BNX_DIR_TYPE_TANG_FW = 16,
+	BNX_DIR_TYPE_TANG_PATCH = 17,
+	BNX_DIR_TYPE_BOOTCODE_2 = 18,
+	BNX_DIR_TYPE_CCM = 19,
+	BNX_DIR_TYPE_PCI_CFG = 20,
+	BNX_DIR_TYPE_TSCF_UCODE = 21,
+	BNX_DIR_TYPE_ISCSI_BOOT = 22,
+	BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
+	BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
+	BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
+	BNX_DIR_TYPE_EXT_PHY = 27,
+	BNX_DIR_TYPE_SHARED_CFG = 40,
+	BNX_DIR_TYPE_PORT_CFG = 41,
+	BNX_DIR_TYPE_FUNC_CFG = 42,
+	BNX_DIR_TYPE_MGMT_CFG = 48,
+	BNX_DIR_TYPE_MGMT_DATA = 49,
+	BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
+	BNX_DIR_TYPE_MGMT_WEB_META = 51,
+	BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
+	BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
+};
+
+#define BNX_DIR_ORDINAL_FIRST			0
+
+#define BNX_DIR_EXT_INACTIVE			(1 << 0)
+#define BNX_DIR_EXT_UPDATE			(1 << 1)
+
+#define BNX_DIR_ATTR_NO_CHKSUM			(1 << 0)
+#define BNX_DIR_ATTR_PROP_STREAM		(1 << 1)
+
+#endif				/* Don't add anything after this line */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
new file mode 100644
index 0000000..60989e7
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -0,0 +1,816 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_ethtool.h"
+
+#ifdef CONFIG_BNXT_SRIOV
+static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
+{
+	if (bp->state != BNXT_STATE_OPEN) {
+		netdev_err(bp->dev, "vf ndo called though PF is down\n");
+		return -EINVAL;
+	}
+	if (!bp->pf.active_vfs) {
+		netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
+		return -EINVAL;
+	}
+	if (vf_id >= bp->pf.max_vfs) {
+		netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
+{
+	struct hwrm_func_cfg_input req = {0};
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vf_info *vf;
+	bool old_setting = false;
+	u32 func_flags;
+	int rc;
+
+	rc = bnxt_vf_ndo_prep(bp, vf_id);
+	if (rc)
+		return rc;
+
+	vf = &bp->pf.vf[vf_id];
+	if (vf->flags & BNXT_VF_SPOOFCHK)
+		old_setting = true;
+	if (old_setting == setting)
+		return 0;
+
+	func_flags = vf->func_flags;
+	if (setting)
+		func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+	else
+		func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+	/*TODO: if the driver supports VLAN filter on guest VLAN,
+	 * the spoof check should also include vlan anti-spoofing
+	 */
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.vf_id = cpu_to_le16(vf->fw_fid);
+	req.flags = cpu_to_le32(func_flags);
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc) {
+		vf->func_flags = func_flags;
+		if (setting)
+			vf->flags |= BNXT_VF_SPOOFCHK;
+		else
+			vf->flags &= ~BNXT_VF_SPOOFCHK;
+	}
+	return rc;
+}
+
+int bnxt_get_vf_config(struct net_device *dev, int vf_id,
+		       struct ifla_vf_info *ivi)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vf_info *vf;
+	int rc;
+
+	rc = bnxt_vf_ndo_prep(bp, vf_id);
+	if (rc)
+		return rc;
+
+	ivi->vf = vf_id;
+	vf = &bp->pf.vf[vf_id];
+
+	memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
+	ivi->max_tx_rate = vf->max_tx_rate;
+	ivi->min_tx_rate = vf->min_tx_rate;
+	ivi->vlan = vf->vlan;
+	ivi->qos = vf->flags & BNXT_VF_QOS;
+	ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
+	if (!(vf->flags & BNXT_VF_LINK_FORCED))
+		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+	else if (vf->flags & BNXT_VF_LINK_UP)
+		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+	else
+		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+
+	return 0;
+}
+
+int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
+{
+	struct hwrm_func_cfg_input req = {0};
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vf_info *vf;
+	int rc;
+
+	rc = bnxt_vf_ndo_prep(bp, vf_id);
+	if (rc)
+		return rc;
+	/* reject bc or mc mac addr, zero mac addr means allow
+	 * VF to use its own mac addr
+	 */
+	if (is_multicast_ether_addr(mac)) {
+		netdev_err(dev, "Invalid VF ethernet address\n");
+		return -EINVAL;
+	}
+	vf = &bp->pf.vf[vf_id];
+
+	memcpy(vf->mac_addr, mac, ETH_ALEN);
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.vf_id = cpu_to_le16(vf->fw_fid);
+	req.flags = cpu_to_le32(vf->func_flags);
+	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+	memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
+{
+	struct hwrm_func_cfg_input req = {0};
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vf_info *vf;
+	u16 vlan_tag;
+	int rc;
+
+	rc = bnxt_vf_ndo_prep(bp, vf_id);
+	if (rc)
+		return rc;
+
+	/* TODO: needed to implement proper handling of user priority,
+	 * currently fail the command if there is valid priority
+	 */
+	if (vlan_id > 4095 || qos)
+		return -EINVAL;
+
+	vf = &bp->pf.vf[vf_id];
+	vlan_tag = vlan_id;
+	if (vlan_tag == vf->vlan)
+		return 0;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.vf_id = cpu_to_le16(vf->fw_fid);
+	req.flags = cpu_to_le32(vf->func_flags);
+	req.dflt_vlan = cpu_to_le16(vlan_tag);
+	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc)
+		vf->vlan = vlan_tag;
+	return rc;
+}
+
+int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
+		   int max_tx_rate)
+{
+	struct hwrm_func_cfg_input req = {0};
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vf_info *vf;
+	u32 pf_link_speed;
+	int rc;
+
+	rc = bnxt_vf_ndo_prep(bp, vf_id);
+	if (rc)
+		return rc;
+
+	vf = &bp->pf.vf[vf_id];
+	pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+	if (max_tx_rate > pf_link_speed) {
+		netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
+			    max_tx_rate, vf_id);
+		return -EINVAL;
+	}
+
+	if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+		netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
+			    min_tx_rate, vf_id);
+		return -EINVAL;
+	}
+	if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
+		return 0;
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+	req.vf_id = cpu_to_le16(vf->fw_fid);
+	req.flags = cpu_to_le32(vf->func_flags);
+	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
+	req.max_bw = cpu_to_le32(max_tx_rate);
+	req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
+	req.min_bw = cpu_to_le32(min_tx_rate);
+	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+	if (!rc) {
+		vf->min_tx_rate = min_tx_rate;
+		vf->max_tx_rate = max_tx_rate;
+	}
+	return rc;
+}
+
+int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_vf_info *vf;
+	int rc;
+
+	rc = bnxt_vf_ndo_prep(bp, vf_id);
+	if (rc)
+		return rc;
+
+	vf = &bp->pf.vf[vf_id];
+
+	vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
+	switch (link) {
+	case IFLA_VF_LINK_STATE_AUTO:
+		vf->flags |= BNXT_VF_LINK_UP;
+		break;
+	case IFLA_VF_LINK_STATE_DISABLE:
+		vf->flags |= BNXT_VF_LINK_FORCED;
+		break;
+	case IFLA_VF_LINK_STATE_ENABLE:
+		vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
+		break;
+	default:
+		netdev_err(bp->dev, "Invalid link option\n");
+		rc = -EINVAL;
+		break;
+	}
+	/* CHIMP TODO: send msg to VF to update new link state */
+
+	return rc;
+}
+
+static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
+{
+	int i;
+	struct bnxt_vf_info *vf;
+
+	for (i = 0; i < num_vfs; i++) {
+		vf = &bp->pf.vf[i];
+		memset(vf, 0, sizeof(*vf));
+		vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
+	}
+	return 0;
+}
+
+static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp)
+{
+	int i, rc = 0;
+	struct bnxt_pf_info *pf = &bp->pf;
+	struct hwrm_func_vf_resc_free_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = pf->first_vf_id; i < pf->first_vf_id + pf->active_vfs; i++) {
+		req.vf_id = cpu_to_le16(i);
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc)
+			break;
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static void bnxt_free_vf_resources(struct bnxt *bp)
+{
+	struct pci_dev *pdev = bp->pdev;
+	int i;
+
+	kfree(bp->pf.vf_event_bmap);
+	bp->pf.vf_event_bmap = NULL;
+
+	for (i = 0; i < 4; i++) {
+		if (bp->pf.hwrm_cmd_req_addr[i]) {
+			dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+					  bp->pf.hwrm_cmd_req_addr[i],
+					  bp->pf.hwrm_cmd_req_dma_addr[i]);
+			bp->pf.hwrm_cmd_req_addr[i] = NULL;
+		}
+	}
+
+	kfree(bp->pf.vf);
+	bp->pf.vf = NULL;
+}
+
+static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
+{
+	struct pci_dev *pdev = bp->pdev;
+	u32 nr_pages, size, i, j, k = 0;
+
+	bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
+	if (!bp->pf.vf)
+		return -ENOMEM;
+
+	bnxt_set_vf_attr(bp, num_vfs);
+
+	size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
+	nr_pages = size / BNXT_PAGE_SIZE;
+	if (size & (BNXT_PAGE_SIZE - 1))
+		nr_pages++;
+
+	for (i = 0; i < nr_pages; i++) {
+		bp->pf.hwrm_cmd_req_addr[i] =
+			dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
+					   &bp->pf.hwrm_cmd_req_dma_addr[i],
+					   GFP_KERNEL);
+
+		if (!bp->pf.hwrm_cmd_req_addr[i])
+			return -ENOMEM;
+
+		for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
+			struct bnxt_vf_info *vf = &bp->pf.vf[k];
+
+			vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
+						j * BNXT_HWRM_REQ_MAX_SIZE;
+			vf->hwrm_cmd_req_dma_addr =
+				bp->pf.hwrm_cmd_req_dma_addr[i] + j *
+				BNXT_HWRM_REQ_MAX_SIZE;
+			k++;
+		}
+	}
+
+	/* Max 128 VF's */
+	bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
+	if (!bp->pf.vf_event_bmap)
+		return -ENOMEM;
+
+	bp->pf.hwrm_cmd_req_pages = nr_pages;
+	return 0;
+}
+
+static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+{
+	struct hwrm_func_buf_rgtr_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
+
+	req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
+	req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
+	req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
+	req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
+	req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
+	req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
+	req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
+
+	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+/* only call by PF to reserve resources for VF */
+static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
+{
+	u32 rc = 0, mtu, i;
+	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
+	struct hwrm_func_cfg_input req = {0};
+	struct bnxt_pf_info *pf = &bp->pf;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+
+	/* Remaining rings are distributed equally amongs VF's for now */
+	/* TODO: the following workaroud is needed to restrict total number
+	 * of vf_cp_rings not exceed number of HW ring groups. This WA should
+	 * be removed once new HWRM provides HW ring groups capability in
+	 * hwrm_func_qcap.
+	 */
+	vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs);
+	vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs;
+	/* TODO: restore this logic below once the WA above is removed */
+	/* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */
+	vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs;
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) /
+			      *num_vfs;
+	else
+		vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) /
+			      *num_vfs;
+	vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs;
+
+	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
+				  FUNC_CFG_REQ_ENABLES_MRU |
+				  FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
+				  FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+				  FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+				  FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
+				  FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+				  FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
+				  FUNC_CFG_REQ_ENABLES_NUM_VNICS);
+
+	mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+	req.mru = cpu_to_le16(mtu);
+	req.mtu = cpu_to_le16(mtu);
+
+	req.num_rsscos_ctxs = cpu_to_le16(1);
+	req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
+	req.num_tx_rings = cpu_to_le16(vf_tx_rings);
+	req.num_rx_rings = cpu_to_le16(vf_rx_rings);
+	req.num_l2_ctxs = cpu_to_le16(4);
+	vf_vnics = 1;
+
+	req.num_vnics = cpu_to_le16(vf_vnics);
+	/* FIXME spec currently uses 1 bit for stats ctx */
+	req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	for (i = 0; i < *num_vfs; i++) {
+		req.vf_id = cpu_to_le16(pf->first_vf_id + i);
+		rc = _hwrm_send_message(bp, &req, sizeof(req),
+					HWRM_CMD_TIMEOUT);
+		if (rc)
+			break;
+		bp->pf.active_vfs = i + 1;
+		bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id);
+	}
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	if (!rc) {
+		bp->pf.max_pf_tx_rings = bp->tx_nr_rings;
+		if (bp->flags & BNXT_FLAG_AGG_RINGS)
+			bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2;
+		else
+			bp->pf.max_pf_rx_rings = bp->rx_nr_rings;
+	}
+	return rc;
+}
+
+static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
+{
+	int rc = 0, vfs_supported;
+	int min_rx_rings, min_tx_rings, min_rss_ctxs;
+	int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+
+	/* Check if we can enable requested num of vf's. At a mininum
+	 * we require 1 RX 1 TX rings for each VF. In this minimum conf
+	 * features like TPA will not be available.
+	 */
+	vfs_supported = *num_vfs;
+
+	while (vfs_supported) {
+		min_rx_rings = vfs_supported;
+		min_tx_rings = vfs_supported;
+		min_rss_ctxs = vfs_supported;
+
+		if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+			if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
+			    min_rx_rings)
+				rx_ok = 1;
+		} else {
+			if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
+			    min_rx_rings)
+				rx_ok = 1;
+		}
+
+		if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
+			tx_ok = 1;
+
+		if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
+			rss_ok = 1;
+
+		if (tx_ok && rx_ok && rss_ok)
+			break;
+
+		vfs_supported--;
+	}
+
+	if (!vfs_supported) {
+		netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
+		return -EINVAL;
+	}
+
+	if (vfs_supported != *num_vfs) {
+		netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
+			    *num_vfs, vfs_supported);
+		*num_vfs = vfs_supported;
+	}
+
+	rc = bnxt_alloc_vf_resources(bp, *num_vfs);
+	if (rc)
+		goto err_out1;
+
+	/* Reserve resources for VFs */
+	rc = bnxt_hwrm_func_cfg(bp, num_vfs);
+	if (rc)
+		goto err_out2;
+
+	/* Register buffers for VFs */
+	rc = bnxt_hwrm_func_buf_rgtr(bp);
+	if (rc)
+		goto err_out2;
+
+	rc = pci_enable_sriov(bp->pdev, *num_vfs);
+	if (rc)
+		goto err_out2;
+
+	return 0;
+
+err_out2:
+	/* Free the resources reserved for various VF's */
+	bnxt_hwrm_func_vf_resource_free(bp);
+
+err_out1:
+	bnxt_free_vf_resources(bp);
+
+	return rc;
+}
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+	if (!bp->pf.active_vfs)
+		return;
+
+	pci_disable_sriov(bp->pdev);
+
+	/* Free the resources reserved for various VF's */
+	bnxt_hwrm_func_vf_resource_free(bp);
+
+	bnxt_free_vf_resources(bp);
+
+	bp->pf.active_vfs = 0;
+	bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings;
+	bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings;
+}
+
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnxt *bp = netdev_priv(dev);
+
+	if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+		netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
+		return 0;
+	}
+
+	rtnl_lock();
+	if (!netif_running(dev)) {
+		netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
+		rtnl_unlock();
+		return 0;
+	}
+	bp->sriov_cfg = true;
+	rtnl_unlock();
+	if (!num_vfs) {
+		bnxt_sriov_disable(bp);
+		return 0;
+	}
+
+	/* Check if enabled VFs is same as requested */
+	if (num_vfs == bp->pf.active_vfs)
+		return 0;
+
+	bnxt_sriov_enable(bp, &num_vfs);
+
+	bp->sriov_cfg = false;
+	wake_up(&bp->sriov_cfg_wait);
+
+	return num_vfs;
+}
+
+static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+			      void *encap_resp, __le64 encap_resp_addr,
+			      __le16 encap_resp_cpr, u32 msg_size)
+{
+	int rc = 0;
+	struct hwrm_fwd_resp_input req = {0};
+	struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
+
+	/* Set the new target id */
+	req.target_id = cpu_to_le16(vf->fw_fid);
+	req.encap_resp_len = cpu_to_le16(msg_size);
+	req.encap_resp_addr = encap_resp_addr;
+	req.encap_resp_cmpl_ring = encap_resp_cpr;
+	memcpy(req.encap_resp, encap_resp, msg_size);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+	if (rc) {
+		netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
+		goto fwd_resp_exit;
+	}
+
+	if (resp->error_code) {
+		netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
+			   resp->error_code);
+		rc = -1;
+	}
+
+fwd_resp_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+				  u32 msg_size)
+{
+	int rc = 0;
+	struct hwrm_reject_fwd_resp_input req = {0};
+	struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
+	/* Set the new target id */
+	req.target_id = cpu_to_le16(vf->fw_fid);
+	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+	if (rc) {
+		netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
+		goto fwd_err_resp_exit;
+	}
+
+	if (resp->error_code) {
+		netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
+			   resp->error_code);
+		rc = -1;
+	}
+
+fwd_err_resp_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
+				   u32 msg_size)
+{
+	int rc = 0;
+	struct hwrm_exec_fwd_resp_input req = {0};
+	struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
+	/* Set the new target id */
+	req.target_id = cpu_to_le16(vf->fw_fid);
+	memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+	if (rc) {
+		netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
+		goto exec_fwd_resp_exit;
+	}
+
+	if (resp->error_code) {
+		netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
+			   resp->error_code);
+		rc = -1;
+	}
+
+exec_fwd_resp_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+	return rc;
+}
+
+static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+	u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
+	struct hwrm_cfa_l2_filter_alloc_input *req =
+		(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+
+	if (!is_valid_ether_addr(vf->mac_addr) ||
+	    ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+		return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+	else
+		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+}
+
+static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+	int rc = 0;
+
+	if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
+		/* real link */
+		rc = bnxt_hwrm_exec_fwd_resp(
+			bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
+	} else {
+		struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+		struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
+
+		phy_qcfg_req =
+		(struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
+		mutex_lock(&bp->hwrm_cmd_lock);
+		memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
+		       sizeof(phy_qcfg_resp));
+		mutex_unlock(&bp->hwrm_cmd_lock);
+		phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+
+		if (vf->flags & BNXT_VF_LINK_UP) {
+			/* if physical link is down, force link up on VF */
+			if (phy_qcfg_resp.link ==
+			    PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
+				phy_qcfg_resp.link =
+					PORT_PHY_QCFG_RESP_LINK_LINK;
+				if (phy_qcfg_resp.auto_link_speed)
+					phy_qcfg_resp.link_speed =
+						phy_qcfg_resp.auto_link_speed;
+				else
+					phy_qcfg_resp.link_speed =
+						phy_qcfg_resp.force_link_speed;
+				phy_qcfg_resp.duplex =
+					PORT_PHY_QCFG_RESP_DUPLEX_FULL;
+				phy_qcfg_resp.pause =
+					(PORT_PHY_QCFG_RESP_PAUSE_TX |
+					 PORT_PHY_QCFG_RESP_PAUSE_RX);
+			}
+		} else {
+			/* force link down */
+			phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
+			phy_qcfg_resp.link_speed = 0;
+			phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
+			phy_qcfg_resp.pause = 0;
+		}
+		rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
+					phy_qcfg_req->resp_addr,
+					phy_qcfg_req->cmpl_ring,
+					sizeof(phy_qcfg_resp));
+	}
+	return rc;
+}
+
+static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
+{
+	int rc = 0;
+	struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr;
+	u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff;
+
+	switch (req_type) {
+	case HWRM_CFA_L2_FILTER_ALLOC:
+		rc = bnxt_vf_validate_set_mac(bp, vf);
+		break;
+	case HWRM_FUNC_CFG:
+		/* TODO Validate if VF is allowed to change mac address,
+		 * mtu, num of rings etc
+		 */
+		rc = bnxt_hwrm_exec_fwd_resp(
+			bp, vf, sizeof(struct hwrm_func_cfg_input));
+		break;
+	case HWRM_PORT_PHY_QCFG:
+		rc = bnxt_vf_set_link(bp, vf);
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+	u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
+
+	/* Scan through VF's and process commands */
+	while (1) {
+		vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
+		if (vf_id >= active_vfs)
+			break;
+
+		clear_bit(vf_id, bp->pf.vf_event_bmap);
+		bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
+		i = vf_id + 1;
+	}
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+	struct hwrm_func_qcaps_input req = {0};
+	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+	req.fid = cpu_to_le16(0xffff);
+
+	mutex_lock(&bp->hwrm_cmd_lock);
+	if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+		goto update_vf_mac_exit;
+
+	if (!is_valid_ether_addr(resp->perm_mac_address))
+		goto update_vf_mac_exit;
+
+	if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
+		goto update_vf_mac_exit;
+
+	memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+	memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+update_vf_mac_exit:
+	mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+#else
+
+void bnxt_sriov_disable(struct bnxt *bp)
+{
+}
+
+void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
+{
+	netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
+}
+
+void bnxt_update_vf_mac(struct bnxt *bp)
+{
+}
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
new file mode 100644
index 0000000..c151280
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -0,0 +1,23 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_SRIOV_H
+#define BNXT_SRIOV_H
+
+int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
+int bnxt_set_vf_mac(struct net_device *, int, u8 *);
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
+int bnxt_set_vf_bw(struct net_device *, int, int, int);
+int bnxt_set_vf_link_state(struct net_device *, int, int);
+int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
+int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void bnxt_sriov_disable(struct bnxt *);
+void bnxt_hwrm_exec_fwd_req(struct bnxt *);
+void bnxt_update_vf_mac(struct bnxt *);
+#endif
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 1a3988f..50f63b7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -793,7 +793,6 @@
 {
 	strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
 	strlcpy(info->version, "v2.0", sizeof(info->version));
-	info->n_stats = BCMGENET_STATS_LEN;
 }
 
 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -1832,6 +1831,24 @@
 	bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
 }
 
+static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
+{
+	u32 int0_enable = 0;
+
+	/* Monitor cable plug/unplugged event for internal PHY, external PHY
+	 * and MoCA PHY
+	 */
+	if (priv->internal_phy) {
+		int0_enable |= UMAC_IRQ_LINK_EVENT;
+	} else if (priv->ext_phy) {
+		int0_enable |= UMAC_IRQ_LINK_EVENT;
+	} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+		if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+			int0_enable |= UMAC_IRQ_LINK_EVENT;
+	}
+	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+}
+
 static int init_umac(struct bcmgenet_priv *priv)
 {
 	struct device *kdev = &priv->pdev->dev;
@@ -1872,15 +1889,8 @@
 	/* Enable Tx default queue 16 interrupts */
 	int0_enable |= UMAC_IRQ_TXDMA_DONE;
 
-	/* Monitor cable plug/unplugged event for internal PHY */
-	if (priv->internal_phy) {
-		int0_enable |= UMAC_IRQ_LINK_EVENT;
-	} else if (priv->ext_phy) {
-		int0_enable |= UMAC_IRQ_LINK_EVENT;
-	} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
-		if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
-			int0_enable |= UMAC_IRQ_LINK_EVENT;
-
+	/* Configure backpressure vectors for MoCA */
+	if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
 		reg = bcmgenet_bp_mc_get(priv);
 		reg |= BIT(priv->hw_params->bp_in_en_shift);
 
@@ -2794,6 +2804,9 @@
 
 	netif_tx_start_all_queues(dev);
 
+	/* Monitor link interrupts now */
+	bcmgenet_link_intr_enable(priv);
+
 	phy_start(priv->phydev);
 }
 
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b7a0f78..9e59663 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1543,7 +1543,7 @@
 }
 
 /* Flush FLI data fifo. */
-static u32
+static int
 bfa_flash_fifo_flush(void __iomem *pci_bar)
 {
 	u32 i;
@@ -1573,11 +1573,11 @@
 }
 
 /* Read flash status. */
-static u32
+static int
 bfa_flash_status_read(void __iomem *pci_bar)
 {
 	union bfa_flash_dev_status_reg	dev_status;
-	u32				status;
+	int				status;
 	u32			ret_status;
 	int				i;
 
@@ -1611,11 +1611,11 @@
 }
 
 /* Start flash read operation. */
-static u32
+static int
 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
 		     char *buf)
 {
-	u32 status;
+	int status;
 
 	/* len must be mutiple of 4 and not exceeding fifo size */
 	if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
@@ -1703,7 +1703,8 @@
 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
 		   u32 len)
 {
-	u32 n, status;
+	u32 n;
+	int status;
 	u32 off, l, s, residue, fifo_sz;
 
 	residue = len;
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 9b35d14..8fb84e6 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -3,7 +3,7 @@
 #
 
 config NET_VENDOR_CAVIUM
-	tristate "Cavium ethernet drivers"
+	bool "Cavium ethernet drivers"
 	depends on PCI
 	default y
 	---help---
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 29f3308..245c063 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -153,7 +153,6 @@
 	strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
 		ETHTOOL_FWVERS_LEN);
 	strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
-	drvinfo->regdump_len = OCT_ETHTOOL_REGDUMP_LEN;
 }
 
 static void
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0a87a32..4269944 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -940,6 +940,7 @@
 
 static const char * const devlog_facility_strings[] = {
 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
+	[FW_DEVLOG_FACILITY_CF]         = "CF",
 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
 	[FW_DEVLOG_FACILITY_RES]	= "RES",
@@ -1128,18 +1129,26 @@
 static int mbox_show(struct seq_file *seq, void *v)
 {
 	static const char * const owner[] = { "none", "FW", "driver",
-					      "unknown" };
+					      "unknown", "<unread>" };
 
 	int i;
 	unsigned int mbox = (uintptr_t)seq->private & 7;
 	struct adapter *adap = seq->private - mbox;
 	void __iomem *addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
-	unsigned int ctrl_reg = (is_t4(adap->params.chip)
-				 ? CIM_PF_MAILBOX_CTRL_A
-				 : CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A);
-	void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
 
-	i = MBOWNER_G(readl(ctrl));
+	/* For T4 we don't have a shadow copy of the Mailbox Control register.
+	 * And since reading that real register causes a side effect of
+	 * granting ownership, we're best of simply not reading it at all.
+	 */
+	if (is_t4(adap->params.chip)) {
+		i = 4; /* index of "<unread>" */
+	} else {
+		unsigned int ctrl_reg = CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A;
+		void __iomem *ctrl = adap->regs + PF_REG(mbox, ctrl_reg);
+
+		i = MBOWNER_G(readl(ctrl));
+	}
+
 	seq_printf(seq, "mailbox owned by %s\n\n", owner[i]);
 
 	for (i = 0; i < MBOX_LEN; i += 8)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 02e4e02..a077f94 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -35,79 +35,79 @@
 }
 
 static const char stats_strings[][ETH_GSTRING_LEN] = {
-	"TxOctetsOK         ",
-	"TxFramesOK         ",
-	"TxBroadcastFrames  ",
-	"TxMulticastFrames  ",
-	"TxUnicastFrames    ",
-	"TxErrorFrames      ",
+	"tx_octets_ok		",
+	"tx_frames_ok		",
+	"tx_broadcast_frames	",
+	"tx_multicast_frames	",
+	"tx_unicast_frames	",
+	"tx_error_frames	",
 
-	"TxFrames64         ",
-	"TxFrames65To127    ",
-	"TxFrames128To255   ",
-	"TxFrames256To511   ",
-	"TxFrames512To1023  ",
-	"TxFrames1024To1518 ",
-	"TxFrames1519ToMax  ",
+	"tx_frames_64		",
+	"tx_frames_65_to_127	",
+	"tx_frames_128_to_255	",
+	"tx_frames_256_to_511	",
+	"tx_frames_512_to_1023	",
+	"tx_frames_1024_to_1518	",
+	"tx_frames_1519_to_max	",
 
-	"TxFramesDropped    ",
-	"TxPauseFrames      ",
-	"TxPPP0Frames       ",
-	"TxPPP1Frames       ",
-	"TxPPP2Frames       ",
-	"TxPPP3Frames       ",
-	"TxPPP4Frames       ",
-	"TxPPP5Frames       ",
-	"TxPPP6Frames       ",
-	"TxPPP7Frames       ",
+	"tx_frames_dropped	",
+	"tx_pause_frames	",
+	"tx_ppp0_frames		",
+	"tx_ppp1_frames		",
+	"tx_ppp2_frames		",
+	"tx_ppp3_frames		",
+	"tx_ppp4_frames		",
+	"tx_ppp5_frames		",
+	"tx_ppp6_frames		",
+	"tx_ppp7_frames		",
 
-	"RxOctetsOK         ",
-	"RxFramesOK         ",
-	"RxBroadcastFrames  ",
-	"RxMulticastFrames  ",
-	"RxUnicastFrames    ",
+	"rx_octets_ok		",
+	"rx_frames_ok		",
+	"rx_broadcast_frames	",
+	"rx_multicast_frames	",
+	"rx_unicast_frames	",
 
-	"RxFramesTooLong    ",
-	"RxJabberErrors     ",
-	"RxFCSErrors        ",
-	"RxLengthErrors     ",
-	"RxSymbolErrors     ",
-	"RxRuntFrames       ",
+	"rx_frames_too_long	",
+	"rx_jabber_errors	",
+	"rx_fcs_errors		",
+	"rx_length_errors	",
+	"rx_symbol_errors	",
+	"rx_runt_frames		",
 
-	"RxFrames64         ",
-	"RxFrames65To127    ",
-	"RxFrames128To255   ",
-	"RxFrames256To511   ",
-	"RxFrames512To1023  ",
-	"RxFrames1024To1518 ",
-	"RxFrames1519ToMax  ",
+	"rx_frames_64		",
+	"rx_frames_65_to_127	",
+	"rx_frames_128_to_255	",
+	"rx_frames_256_to_511	",
+	"rx_frames_512_to_1023	",
+	"rx_frames_1024_to_1518	",
+	"rx_frames_1519_to_max	",
 
-	"RxPauseFrames      ",
-	"RxPPP0Frames       ",
-	"RxPPP1Frames       ",
-	"RxPPP2Frames       ",
-	"RxPPP3Frames       ",
-	"RxPPP4Frames       ",
-	"RxPPP5Frames       ",
-	"RxPPP6Frames       ",
-	"RxPPP7Frames       ",
+	"rx_pause_frames	",
+	"rx_ppp0_frames		",
+	"rx_ppp1_frames		",
+	"rx_ppp2_frames		",
+	"rx_ppp3_frames		",
+	"rx_ppp4_frames		",
+	"rx_ppp5_frames		",
+	"rx_ppp6_frames		",
+	"rx_ppp7_frames		",
 
-	"RxBG0FramesDropped ",
-	"RxBG1FramesDropped ",
-	"RxBG2FramesDropped ",
-	"RxBG3FramesDropped ",
-	"RxBG0FramesTrunc   ",
-	"RxBG1FramesTrunc   ",
-	"RxBG2FramesTrunc   ",
-	"RxBG3FramesTrunc   ",
+	"rx_bg0_frames_dropped	",
+	"rx_bg1_frames_dropped	",
+	"rx_bg2_frames_dropped	",
+	"rx_bg3_frames_dropped	",
+	"rx_bg0_frames_trunc	",
+	"rx_bg1_frames_trunc	",
+	"rx_bg2_frames_trunc	",
+	"rx_bg3_frames_trunc	",
 
-	"TSO                ",
-	"TxCsumOffload      ",
-	"RxCsumGood         ",
-	"VLANextractions    ",
-	"VLANinsertions     ",
-	"GROpackets         ",
-	"GROmerged          ",
+	"tso			",
+	"tx_csum_offload	",
+	"rx_csum_good		",
+	"vlan_extractions	",
+	"vlan_insertions	",
+	"gro_packets		",
+	"gro_merged		",
 };
 
 static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
@@ -211,8 +211,11 @@
 		sizeof(info->version));
 	strlcpy(info->bus_info, pci_name(adapter->pdev),
 		sizeof(info->bus_info));
+	info->regdump_len = get_regs_len(dev);
 
-	if (adapter->params.fw_vers)
+	if (!adapter->params.fw_vers)
+		strcpy(info->fw_version, "N/A");
+	else
 		snprintf(info->fw_version, sizeof(info->fw_version),
 			 "%u.%u.%u.%u, TP %u.%u.%u.%u",
 			 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
@@ -612,6 +615,8 @@
 	struct port_info *p = netdev_priv(dev);
 	struct link_config *lc = &p->link_cfg;
 	u32 speed = ethtool_cmd_speed(cmd);
+	struct link_config old_lc;
+	int ret;
 
 	if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
 		return -EINVAL;
@@ -626,13 +631,11 @@
 		return -EINVAL;
 	}
 
+	old_lc = *lc;
 	if (cmd->autoneg == AUTONEG_DISABLE) {
 		cap = speed_to_caps(speed);
 
-		if (!(lc->supported & cap) ||
-		    (speed == 1000) ||
-		    (speed == 10000) ||
-		    (speed == 40000))
+		if (!(lc->supported & cap))
 			return -EINVAL;
 		lc->requested_speed = cap;
 		lc->advertising = 0;
@@ -645,10 +648,14 @@
 	}
 	lc->autoneg = cmd->autoneg;
 
-	if (netif_running(dev))
-		return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
-				     lc);
-	return 0;
+	/* If the firmware rejects the Link Configuration request, back out
+	 * the changes and report the error.
+	 */
+	ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc);
+	if (ret)
+		*lc = old_lc;
+
+	return ret;
 }
 
 static void get_pauseparam(struct net_device *dev,
@@ -847,7 +854,7 @@
 {
 	int i, err = 0;
 	struct adapter *adapter = netdev2adap(dev);
-	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+	u8 *buf = t4_alloc_mem(EEPROMSIZE);
 
 	if (!buf)
 		return -ENOMEM;
@@ -858,7 +865,7 @@
 
 	if (!err)
 		memcpy(data, buf + e->offset, e->len);
-	kfree(buf);
+	t4_free_mem(buf);
 	return err;
 }
 
@@ -887,7 +894,7 @@
 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
 		/* RMW possibly needed for first or last words.
 		 */
-		buf = kmalloc(aligned_len, GFP_KERNEL);
+		buf = t4_alloc_mem(aligned_len);
 		if (!buf)
 			return -ENOMEM;
 		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
@@ -915,7 +922,7 @@
 		err = t4_seeprom_wp(adapter, true);
 out:
 	if (buf != data)
-		kfree(buf);
+		t4_free_mem(buf);
 	return err;
 }
 
@@ -1011,11 +1018,15 @@
 	if (!p)
 		return 0;
 
-	for (i = 0; i < pi->rss_size; i++)
-		pi->rss[i] = p[i];
-	if (pi->adapter->flags & FULL_INIT_DONE)
+	/* Interface must be brought up atleast once */
+	if (pi->adapter->flags & FULL_INIT_DONE) {
+		for (i = 0; i < pi->rss_size; i++)
+			pi->rss[i] = p[i];
+
 		return cxgb4_write_rss(pi, pi->rss);
-	return 0;
+	}
+
+	return -EPERM;
 }
 
 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0b7570d..2cf8185 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -83,7 +83,7 @@
 #endif
 #define DRV_VERSION "2.0.0-ko"
 const char cxgb4_driver_version[] = DRV_VERSION;
-#define DRV_DESC "Chelsio T4/T5 Network Driver"
+#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
 
 /* Host shadow copy of ingress filter entry.  This is in host native format
  * and doesn't match the ordering or bit order, etc. of the hardware of the
@@ -151,6 +151,7 @@
 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
 MODULE_FIRMWARE(FW4_FNAME);
 MODULE_FIRMWARE(FW5_FNAME);
+MODULE_FIRMWARE(FW6_FNAME);
 
 /*
  * Normally we're willing to become the firmware's Master PF but will be happy
@@ -275,7 +276,7 @@
 	else {
 		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
 
-		const char *s = "10Mbps";
+		const char *s;
 		const struct port_info *p = netdev_priv(dev);
 
 		switch (p->link_cfg.speed) {
@@ -291,6 +292,10 @@
 		case 40000:
 			s = "40Gbps";
 			break;
+		default:
+			pr_info("%s: unsupported speed: %d\n",
+				dev->name, p->link_cfg.speed);
+			return;
 		}
 
 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
@@ -3694,7 +3699,7 @@
 	t4_get_tp_version(adap, &adap->params.tp_vers);
 	ret = t4_check_fw_version(adap);
 	/* If firmware is too old (not supported by driver) force an update. */
-	if (ret == -EFAULT)
+	if (ret)
 		state = DEV_STATE_UNINIT;
 	if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
 		struct fw_info *fw_info;
@@ -4481,6 +4486,10 @@
 	}
 	for (i = 0; i < allocated; ++i)
 		adap->msix_info[i].vec = entries[i].vector;
+	dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
+		 "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
+		 allocated, s->max_ethqsets, s->ofldqsets, s->rdmaqs,
+		 s->rdmaciqs);
 
 	kfree(entries);
 	return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4480625..cf61a58 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -699,50 +699,107 @@
 {
 	static const unsigned int t4_reg_ranges[] = {
 		0x1008, 0x1108,
-		0x1180, 0x11b4,
+		0x1180, 0x1184,
+		0x1190, 0x1194,
+		0x11a0, 0x11a4,
+		0x11b0, 0x11b4,
 		0x11fc, 0x123c,
 		0x1300, 0x173c,
 		0x1800, 0x18fc,
-		0x3000, 0x305c,
-		0x3068, 0x30d8,
-		0x30e0, 0x5924,
-		0x5960, 0x59d4,
-		0x5a00, 0x5af8,
+		0x3000, 0x30d8,
+		0x30e0, 0x30e4,
+		0x30ec, 0x5910,
+		0x5920, 0x5924,
+		0x5960, 0x5960,
+		0x5968, 0x5968,
+		0x5970, 0x5970,
+		0x5978, 0x5978,
+		0x5980, 0x5980,
+		0x5988, 0x5988,
+		0x5990, 0x5990,
+		0x5998, 0x5998,
+		0x59a0, 0x59d4,
+		0x5a00, 0x5ae0,
+		0x5ae8, 0x5ae8,
+		0x5af0, 0x5af0,
+		0x5af8, 0x5af8,
 		0x6000, 0x6098,
 		0x6100, 0x6150,
 		0x6200, 0x6208,
 		0x6240, 0x6248,
-		0x6280, 0x6338,
+		0x6280, 0x62b0,
+		0x62c0, 0x6338,
 		0x6370, 0x638c,
 		0x6400, 0x643c,
 		0x6500, 0x6524,
-		0x6a00, 0x6a38,
-		0x6a60, 0x6a78,
-		0x6b00, 0x6b84,
-		0x6bf0, 0x6c84,
-		0x6cf0, 0x6d84,
-		0x6df0, 0x6e84,
-		0x6ef0, 0x6f84,
-		0x6ff0, 0x7084,
-		0x70f0, 0x7184,
-		0x71f0, 0x7284,
-		0x72f0, 0x7384,
-		0x73f0, 0x7450,
+		0x6a00, 0x6a04,
+		0x6a14, 0x6a38,
+		0x6a60, 0x6a70,
+		0x6a78, 0x6a78,
+		0x6b00, 0x6b0c,
+		0x6b1c, 0x6b84,
+		0x6bf0, 0x6bf8,
+		0x6c00, 0x6c0c,
+		0x6c1c, 0x6c84,
+		0x6cf0, 0x6cf8,
+		0x6d00, 0x6d0c,
+		0x6d1c, 0x6d84,
+		0x6df0, 0x6df8,
+		0x6e00, 0x6e0c,
+		0x6e1c, 0x6e84,
+		0x6ef0, 0x6ef8,
+		0x6f00, 0x6f0c,
+		0x6f1c, 0x6f84,
+		0x6ff0, 0x6ff8,
+		0x7000, 0x700c,
+		0x701c, 0x7084,
+		0x70f0, 0x70f8,
+		0x7100, 0x710c,
+		0x711c, 0x7184,
+		0x71f0, 0x71f8,
+		0x7200, 0x720c,
+		0x721c, 0x7284,
+		0x72f0, 0x72f8,
+		0x7300, 0x730c,
+		0x731c, 0x7384,
+		0x73f0, 0x73f8,
+		0x7400, 0x7450,
 		0x7500, 0x7530,
-		0x7600, 0x761c,
+		0x7600, 0x760c,
+		0x7614, 0x761c,
 		0x7680, 0x76cc,
 		0x7700, 0x7798,
 		0x77c0, 0x77fc,
 		0x7900, 0x79fc,
-		0x7b00, 0x7c38,
-		0x7d00, 0x7efc,
-		0x8dc0, 0x8e1c,
+		0x7b00, 0x7b58,
+		0x7b60, 0x7b84,
+		0x7b8c, 0x7c38,
+		0x7d00, 0x7d38,
+		0x7d40, 0x7d80,
+		0x7d8c, 0x7ddc,
+		0x7de4, 0x7e04,
+		0x7e10, 0x7e1c,
+		0x7e24, 0x7e38,
+		0x7e40, 0x7e44,
+		0x7e4c, 0x7e78,
+		0x7e80, 0x7ea4,
+		0x7eac, 0x7edc,
+		0x7ee8, 0x7efc,
+		0x8dc0, 0x8e04,
+		0x8e10, 0x8e1c,
 		0x8e30, 0x8e78,
-		0x8ea0, 0x8f6c,
-		0x8fc0, 0x9074,
+		0x8ea0, 0x8eb8,
+		0x8ec0, 0x8f6c,
+		0x8fc0, 0x9008,
+		0x9010, 0x9058,
+		0x9060, 0x9060,
+		0x9068, 0x9074,
 		0x90fc, 0x90fc,
-		0x9400, 0x9458,
-		0x9600, 0x96bc,
+		0x9400, 0x9408,
+		0x9410, 0x9458,
+		0x9600, 0x9600,
+		0x9608, 0x9638,
+		0x9640, 0x96bc,
 		0x9800, 0x9808,
 		0x9820, 0x983c,
 		0x9850, 0x9864,
@@ -754,23 +811,42 @@
 		0x9e80, 0x9eec,
 		0x9f00, 0x9f6c,
 		0x9f80, 0x9fec,
-		0xd004, 0xd03c,
+		0xd004, 0xd004,
+		0xd010, 0xd03c,
 		0xdfc0, 0xdfe0,
 		0xe000, 0xea7c,
-		0xf000, 0x11110,
-		0x11118, 0x11190,
+		0xf000, 0x11190,
 		0x19040, 0x1906c,
 		0x19078, 0x19080,
-		0x1908c, 0x19124,
-		0x19150, 0x191b0,
+		0x1908c, 0x190e4,
+		0x190f0, 0x190f8,
+		0x19100, 0x19110,
+		0x19120, 0x19124,
+		0x19150, 0x19194,
+		0x1919c, 0x191b0,
 		0x191d0, 0x191e8,
 		0x19238, 0x1924c,
-		0x193f8, 0x19474,
-		0x19490, 0x194f8,
-		0x19800, 0x19f4c,
-		0x1a000, 0x1a06c,
-		0x1a0b0, 0x1a120,
-		0x1a128, 0x1a138,
+		0x193f8, 0x1943c,
+		0x1944c, 0x19474,
+		0x19490, 0x194e0,
+		0x194f0, 0x194f8,
+		0x19800, 0x19c08,
+		0x19c10, 0x19c90,
+		0x19ca0, 0x19ce4,
+		0x19cf0, 0x19d40,
+		0x19d50, 0x19d94,
+		0x19da0, 0x19de8,
+		0x19df0, 0x19e40,
+		0x19e50, 0x19e90,
+		0x19ea0, 0x19f4c,
+		0x1a000, 0x1a004,
+		0x1a010, 0x1a06c,
+		0x1a0b0, 0x1a0e4,
+		0x1a0ec, 0x1a0f4,
+		0x1a100, 0x1a108,
+		0x1a114, 0x1a120,
+		0x1a128, 0x1a130,
+		0x1a138, 0x1a138,
 		0x1a190, 0x1a1c4,
 		0x1a1fc, 0x1a1fc,
 		0x1e040, 0x1e04c,
@@ -823,9 +899,12 @@
 		0x1ffc0, 0x1ffc8,
 		0x20000, 0x2002c,
 		0x20100, 0x2013c,
-		0x20190, 0x201c8,
+		0x20190, 0x201a0,
+		0x201a8, 0x201b8,
+		0x201c4, 0x201c8,
 		0x20200, 0x20318,
-		0x20400, 0x20528,
+		0x20400, 0x204b4,
+		0x204c0, 0x20528,
 		0x20540, 0x20614,
 		0x21000, 0x21040,
 		0x2104c, 0x21060,
@@ -834,22 +913,62 @@
 		0x21270, 0x21284,
 		0x212fc, 0x21388,
 		0x21400, 0x21404,
-		0x21500, 0x21518,
-		0x2152c, 0x2153c,
+		0x21500, 0x21500,
+		0x21510, 0x21518,
+		0x2152c, 0x21530,
+		0x2153c, 0x2153c,
 		0x21550, 0x21554,
 		0x21600, 0x21600,
-		0x21608, 0x21628,
-		0x21630, 0x2163c,
+		0x21608, 0x2161c,
+		0x21624, 0x21628,
+		0x21630, 0x21634,
+		0x2163c, 0x2163c,
 		0x21700, 0x2171c,
 		0x21780, 0x2178c,
-		0x21800, 0x21c38,
-		0x21c80, 0x21d7c,
+		0x21800, 0x21818,
+		0x21820, 0x21828,
+		0x21830, 0x21848,
+		0x21850, 0x21854,
+		0x21860, 0x21868,
+		0x21870, 0x21870,
+		0x21878, 0x21898,
+		0x218a0, 0x218a8,
+		0x218b0, 0x218c8,
+		0x218d0, 0x218d4,
+		0x218e0, 0x218e8,
+		0x218f0, 0x218f0,
+		0x218f8, 0x21a18,
+		0x21a20, 0x21a28,
+		0x21a30, 0x21a48,
+		0x21a50, 0x21a54,
+		0x21a60, 0x21a68,
+		0x21a70, 0x21a70,
+		0x21a78, 0x21a98,
+		0x21aa0, 0x21aa8,
+		0x21ab0, 0x21ac8,
+		0x21ad0, 0x21ad4,
+		0x21ae0, 0x21ae8,
+		0x21af0, 0x21af0,
+		0x21af8, 0x21c18,
+		0x21c20, 0x21c20,
+		0x21c28, 0x21c30,
+		0x21c38, 0x21c38,
+		0x21c80, 0x21c98,
+		0x21ca0, 0x21ca8,
+		0x21cb0, 0x21cc8,
+		0x21cd0, 0x21cd4,
+		0x21ce0, 0x21ce8,
+		0x21cf0, 0x21cf0,
+		0x21cf8, 0x21d7c,
 		0x21e00, 0x21e04,
 		0x22000, 0x2202c,
 		0x22100, 0x2213c,
-		0x22190, 0x221c8,
+		0x22190, 0x221a0,
+		0x221a8, 0x221b8,
+		0x221c4, 0x221c8,
 		0x22200, 0x22318,
-		0x22400, 0x22528,
+		0x22400, 0x224b4,
+		0x224c0, 0x22528,
 		0x22540, 0x22614,
 		0x23000, 0x23040,
 		0x2304c, 0x23060,
@@ -858,22 +977,62 @@
 		0x23270, 0x23284,
 		0x232fc, 0x23388,
 		0x23400, 0x23404,
-		0x23500, 0x23518,
-		0x2352c, 0x2353c,
+		0x23500, 0x23500,
+		0x23510, 0x23518,
+		0x2352c, 0x23530,
+		0x2353c, 0x2353c,
 		0x23550, 0x23554,
 		0x23600, 0x23600,
-		0x23608, 0x23628,
-		0x23630, 0x2363c,
+		0x23608, 0x2361c,
+		0x23624, 0x23628,
+		0x23630, 0x23634,
+		0x2363c, 0x2363c,
 		0x23700, 0x2371c,
 		0x23780, 0x2378c,
-		0x23800, 0x23c38,
-		0x23c80, 0x23d7c,
+		0x23800, 0x23818,
+		0x23820, 0x23828,
+		0x23830, 0x23848,
+		0x23850, 0x23854,
+		0x23860, 0x23868,
+		0x23870, 0x23870,
+		0x23878, 0x23898,
+		0x238a0, 0x238a8,
+		0x238b0, 0x238c8,
+		0x238d0, 0x238d4,
+		0x238e0, 0x238e8,
+		0x238f0, 0x238f0,
+		0x238f8, 0x23a18,
+		0x23a20, 0x23a28,
+		0x23a30, 0x23a48,
+		0x23a50, 0x23a54,
+		0x23a60, 0x23a68,
+		0x23a70, 0x23a70,
+		0x23a78, 0x23a98,
+		0x23aa0, 0x23aa8,
+		0x23ab0, 0x23ac8,
+		0x23ad0, 0x23ad4,
+		0x23ae0, 0x23ae8,
+		0x23af0, 0x23af0,
+		0x23af8, 0x23c18,
+		0x23c20, 0x23c20,
+		0x23c28, 0x23c30,
+		0x23c38, 0x23c38,
+		0x23c80, 0x23c98,
+		0x23ca0, 0x23ca8,
+		0x23cb0, 0x23cc8,
+		0x23cd0, 0x23cd4,
+		0x23ce0, 0x23ce8,
+		0x23cf0, 0x23cf0,
+		0x23cf8, 0x23d7c,
 		0x23e00, 0x23e04,
 		0x24000, 0x2402c,
 		0x24100, 0x2413c,
-		0x24190, 0x241c8,
+		0x24190, 0x241a0,
+		0x241a8, 0x241b8,
+		0x241c4, 0x241c8,
 		0x24200, 0x24318,
-		0x24400, 0x24528,
+		0x24400, 0x244b4,
+		0x244c0, 0x24528,
 		0x24540, 0x24614,
 		0x25000, 0x25040,
 		0x2504c, 0x25060,
@@ -882,22 +1041,62 @@
 		0x25270, 0x25284,
 		0x252fc, 0x25388,
 		0x25400, 0x25404,
-		0x25500, 0x25518,
-		0x2552c, 0x2553c,
+		0x25500, 0x25500,
+		0x25510, 0x25518,
+		0x2552c, 0x25530,
+		0x2553c, 0x2553c,
 		0x25550, 0x25554,
 		0x25600, 0x25600,
-		0x25608, 0x25628,
-		0x25630, 0x2563c,
+		0x25608, 0x2561c,
+		0x25624, 0x25628,
+		0x25630, 0x25634,
+		0x2563c, 0x2563c,
 		0x25700, 0x2571c,
 		0x25780, 0x2578c,
-		0x25800, 0x25c38,
-		0x25c80, 0x25d7c,
+		0x25800, 0x25818,
+		0x25820, 0x25828,
+		0x25830, 0x25848,
+		0x25850, 0x25854,
+		0x25860, 0x25868,
+		0x25870, 0x25870,
+		0x25878, 0x25898,
+		0x258a0, 0x258a8,
+		0x258b0, 0x258c8,
+		0x258d0, 0x258d4,
+		0x258e0, 0x258e8,
+		0x258f0, 0x258f0,
+		0x258f8, 0x25a18,
+		0x25a20, 0x25a28,
+		0x25a30, 0x25a48,
+		0x25a50, 0x25a54,
+		0x25a60, 0x25a68,
+		0x25a70, 0x25a70,
+		0x25a78, 0x25a98,
+		0x25aa0, 0x25aa8,
+		0x25ab0, 0x25ac8,
+		0x25ad0, 0x25ad4,
+		0x25ae0, 0x25ae8,
+		0x25af0, 0x25af0,
+		0x25af8, 0x25c18,
+		0x25c20, 0x25c20,
+		0x25c28, 0x25c30,
+		0x25c38, 0x25c38,
+		0x25c80, 0x25c98,
+		0x25ca0, 0x25ca8,
+		0x25cb0, 0x25cc8,
+		0x25cd0, 0x25cd4,
+		0x25ce0, 0x25ce8,
+		0x25cf0, 0x25cf0,
+		0x25cf8, 0x25d7c,
 		0x25e00, 0x25e04,
 		0x26000, 0x2602c,
 		0x26100, 0x2613c,
-		0x26190, 0x261c8,
+		0x26190, 0x261a0,
+		0x261a8, 0x261b8,
+		0x261c4, 0x261c8,
 		0x26200, 0x26318,
-		0x26400, 0x26528,
+		0x26400, 0x264b4,
+		0x264c0, 0x26528,
 		0x26540, 0x26614,
 		0x27000, 0x27040,
 		0x2704c, 0x27060,
@@ -906,51 +1105,120 @@
 		0x27270, 0x27284,
 		0x272fc, 0x27388,
 		0x27400, 0x27404,
-		0x27500, 0x27518,
-		0x2752c, 0x2753c,
+		0x27500, 0x27500,
+		0x27510, 0x27518,
+		0x2752c, 0x27530,
+		0x2753c, 0x2753c,
 		0x27550, 0x27554,
 		0x27600, 0x27600,
-		0x27608, 0x27628,
-		0x27630, 0x2763c,
+		0x27608, 0x2761c,
+		0x27624, 0x27628,
+		0x27630, 0x27634,
+		0x2763c, 0x2763c,
 		0x27700, 0x2771c,
 		0x27780, 0x2778c,
-		0x27800, 0x27c38,
-		0x27c80, 0x27d7c,
+		0x27800, 0x27818,
+		0x27820, 0x27828,
+		0x27830, 0x27848,
+		0x27850, 0x27854,
+		0x27860, 0x27868,
+		0x27870, 0x27870,
+		0x27878, 0x27898,
+		0x278a0, 0x278a8,
+		0x278b0, 0x278c8,
+		0x278d0, 0x278d4,
+		0x278e0, 0x278e8,
+		0x278f0, 0x278f0,
+		0x278f8, 0x27a18,
+		0x27a20, 0x27a28,
+		0x27a30, 0x27a48,
+		0x27a50, 0x27a54,
+		0x27a60, 0x27a68,
+		0x27a70, 0x27a70,
+		0x27a78, 0x27a98,
+		0x27aa0, 0x27aa8,
+		0x27ab0, 0x27ac8,
+		0x27ad0, 0x27ad4,
+		0x27ae0, 0x27ae8,
+		0x27af0, 0x27af0,
+		0x27af8, 0x27c18,
+		0x27c20, 0x27c20,
+		0x27c28, 0x27c30,
+		0x27c38, 0x27c38,
+		0x27c80, 0x27c98,
+		0x27ca0, 0x27ca8,
+		0x27cb0, 0x27cc8,
+		0x27cd0, 0x27cd4,
+		0x27ce0, 0x27ce8,
+		0x27cf0, 0x27cf0,
+		0x27cf8, 0x27d7c,
 		0x27e00, 0x27e04,
 	};
 
 	static const unsigned int t5_reg_ranges[] = {
-		0x1008, 0x1148,
-		0x1180, 0x11b4,
+		0x1008, 0x10c0,
+		0x10cc, 0x10f8,
+		0x1100, 0x1100,
+		0x110c, 0x1148,
+		0x1180, 0x1184,
+		0x1190, 0x1194,
+		0x11a0, 0x11a4,
+		0x11b0, 0x11b4,
 		0x11fc, 0x123c,
 		0x1280, 0x173c,
 		0x1800, 0x18fc,
 		0x3000, 0x3028,
-		0x3068, 0x30d8,
+		0x3060, 0x30b0,
+		0x30b8, 0x30d8,
 		0x30e0, 0x30fc,
 		0x3140, 0x357c,
 		0x35a8, 0x35cc,
 		0x35ec, 0x35ec,
 		0x3600, 0x5624,
-		0x56cc, 0x575c,
+		0x56cc, 0x56ec,
+		0x56f4, 0x5720,
+		0x5728, 0x575c,
 		0x580c, 0x5814,
-		0x5890, 0x58bc,
-		0x5940, 0x59dc,
+		0x5890, 0x589c,
+		0x58a4, 0x58ac,
+		0x58b8, 0x58bc,
+		0x5940, 0x59c8,
+		0x59d0, 0x59dc,
 		0x59fc, 0x5a18,
-		0x5a60, 0x5a9c,
+		0x5a60, 0x5a70,
+		0x5a80, 0x5a9c,
 		0x5b94, 0x5bfc,
-		0x6000, 0x6040,
-		0x6058, 0x614c,
+		0x6000, 0x6020,
+		0x6028, 0x6040,
+		0x6058, 0x609c,
+		0x60a8, 0x614c,
 		0x7700, 0x7798,
 		0x77c0, 0x78fc,
-		0x7b00, 0x7c54,
-		0x7d00, 0x7efc,
+		0x7b00, 0x7b58,
+		0x7b60, 0x7b84,
+		0x7b8c, 0x7c54,
+		0x7d00, 0x7d38,
+		0x7d40, 0x7d80,
+		0x7d8c, 0x7ddc,
+		0x7de4, 0x7e04,
+		0x7e10, 0x7e1c,
+		0x7e24, 0x7e38,
+		0x7e40, 0x7e44,
+		0x7e4c, 0x7e78,
+		0x7e80, 0x7edc,
+		0x7ee8, 0x7efc,
 		0x8dc0, 0x8de0,
-		0x8df8, 0x8e84,
+		0x8df8, 0x8e04,
+		0x8e10, 0x8e84,
 		0x8ea0, 0x8f84,
-		0x8fc0, 0x90f8,
-		0x9400, 0x9470,
-		0x9600, 0x96f4,
+		0x8fc0, 0x9058,
+		0x9060, 0x9060,
+		0x9068, 0x90f8,
+		0x9400, 0x9408,
+		0x9410, 0x9470,
+		0x9600, 0x9600,
+		0x9608, 0x9638,
+		0x9640, 0x96f4,
 		0x9800, 0x9808,
 		0x9820, 0x983c,
 		0x9850, 0x9864,
@@ -962,103 +1230,143 @@
 		0x9e80, 0x9eec,
 		0x9f00, 0x9f6c,
 		0x9f80, 0xa020,
-		0xd004, 0xd03c,
+		0xd004, 0xd004,
+		0xd010, 0xd03c,
 		0xdfc0, 0xdfe0,
-		0xe000, 0x11088,
-		0x1109c, 0x11110,
-		0x11118, 0x1117c,
+		0xe000, 0x1106c,
+		0x11074, 0x11088,
+		0x1109c, 0x1117c,
 		0x11190, 0x11204,
 		0x19040, 0x1906c,
 		0x19078, 0x19080,
-		0x1908c, 0x19124,
-		0x19150, 0x191b0,
+		0x1908c, 0x190e8,
+		0x190f0, 0x190f8,
+		0x19100, 0x19110,
+		0x19120, 0x19124,
+		0x19150, 0x19194,
+		0x1919c, 0x191b0,
 		0x191d0, 0x191e8,
 		0x19238, 0x19290,
-		0x193f8, 0x19474,
+		0x193f8, 0x19428,
+		0x19430, 0x19444,
+		0x1944c, 0x1946c,
+		0x19474, 0x19474,
 		0x19490, 0x194cc,
 		0x194f0, 0x194f8,
-		0x19c00, 0x19c60,
-		0x19c94, 0x19e10,
-		0x19e50, 0x19f34,
+		0x19c00, 0x19c08,
+		0x19c10, 0x19c60,
+		0x19c94, 0x19ce4,
+		0x19cf0, 0x19d40,
+		0x19d50, 0x19d94,
+		0x19da0, 0x19de8,
+		0x19df0, 0x19e10,
+		0x19e50, 0x19e90,
+		0x19ea0, 0x19f24,
+		0x19f34, 0x19f34,
 		0x19f40, 0x19f50,
-		0x19f90, 0x19fe4,
-		0x1a000, 0x1a06c,
-		0x1a0b0, 0x1a120,
-		0x1a128, 0x1a138,
+		0x19f90, 0x19fb4,
+		0x19fc4, 0x19fe4,
+		0x1a000, 0x1a004,
+		0x1a010, 0x1a06c,
+		0x1a0b0, 0x1a0e4,
+		0x1a0ec, 0x1a0f8,
+		0x1a100, 0x1a108,
+		0x1a114, 0x1a120,
+		0x1a128, 0x1a130,
+		0x1a138, 0x1a138,
 		0x1a190, 0x1a1c4,
 		0x1a1fc, 0x1a1fc,
 		0x1e008, 0x1e00c,
-		0x1e040, 0x1e04c,
+		0x1e040, 0x1e044,
+		0x1e04c, 0x1e04c,
 		0x1e284, 0x1e290,
 		0x1e2c0, 0x1e2c0,
 		0x1e2e0, 0x1e2e0,
 		0x1e300, 0x1e384,
 		0x1e3c0, 0x1e3c8,
 		0x1e408, 0x1e40c,
-		0x1e440, 0x1e44c,
+		0x1e440, 0x1e444,
+		0x1e44c, 0x1e44c,
 		0x1e684, 0x1e690,
 		0x1e6c0, 0x1e6c0,
 		0x1e6e0, 0x1e6e0,
 		0x1e700, 0x1e784,
 		0x1e7c0, 0x1e7c8,
 		0x1e808, 0x1e80c,
-		0x1e840, 0x1e84c,
+		0x1e840, 0x1e844,
+		0x1e84c, 0x1e84c,
 		0x1ea84, 0x1ea90,
 		0x1eac0, 0x1eac0,
 		0x1eae0, 0x1eae0,
 		0x1eb00, 0x1eb84,
 		0x1ebc0, 0x1ebc8,
 		0x1ec08, 0x1ec0c,
-		0x1ec40, 0x1ec4c,
+		0x1ec40, 0x1ec44,
+		0x1ec4c, 0x1ec4c,
 		0x1ee84, 0x1ee90,
 		0x1eec0, 0x1eec0,
 		0x1eee0, 0x1eee0,
 		0x1ef00, 0x1ef84,
 		0x1efc0, 0x1efc8,
 		0x1f008, 0x1f00c,
-		0x1f040, 0x1f04c,
+		0x1f040, 0x1f044,
+		0x1f04c, 0x1f04c,
 		0x1f284, 0x1f290,
 		0x1f2c0, 0x1f2c0,
 		0x1f2e0, 0x1f2e0,
 		0x1f300, 0x1f384,
 		0x1f3c0, 0x1f3c8,
 		0x1f408, 0x1f40c,
-		0x1f440, 0x1f44c,
+		0x1f440, 0x1f444,
+		0x1f44c, 0x1f44c,
 		0x1f684, 0x1f690,
 		0x1f6c0, 0x1f6c0,
 		0x1f6e0, 0x1f6e0,
 		0x1f700, 0x1f784,
 		0x1f7c0, 0x1f7c8,
 		0x1f808, 0x1f80c,
-		0x1f840, 0x1f84c,
+		0x1f840, 0x1f844,
+		0x1f84c, 0x1f84c,
 		0x1fa84, 0x1fa90,
 		0x1fac0, 0x1fac0,
 		0x1fae0, 0x1fae0,
 		0x1fb00, 0x1fb84,
 		0x1fbc0, 0x1fbc8,
 		0x1fc08, 0x1fc0c,
-		0x1fc40, 0x1fc4c,
+		0x1fc40, 0x1fc44,
+		0x1fc4c, 0x1fc4c,
 		0x1fe84, 0x1fe90,
 		0x1fec0, 0x1fec0,
 		0x1fee0, 0x1fee0,
 		0x1ff00, 0x1ff84,
 		0x1ffc0, 0x1ffc8,
 		0x30000, 0x30030,
+		0x30038, 0x30038,
+		0x30040, 0x30040,
 		0x30100, 0x30144,
-		0x30190, 0x301d0,
+		0x30190, 0x301a0,
+		0x301a8, 0x301b8,
+		0x301c4, 0x301c8,
+		0x301d0, 0x301d0,
 		0x30200, 0x30318,
-		0x30400, 0x3052c,
+		0x30400, 0x304b4,
+		0x304c0, 0x3052c,
 		0x30540, 0x3061c,
-		0x30800, 0x30834,
+		0x30800, 0x30828,
+		0x30834, 0x30834,
 		0x308c0, 0x30908,
 		0x30910, 0x309ac,
-		0x30a00, 0x30a2c,
+		0x30a00, 0x30a14,
+		0x30a1c, 0x30a2c,
 		0x30a44, 0x30a50,
-		0x30a74, 0x30c24,
+		0x30a74, 0x30a74,
+		0x30a7c, 0x30afc,
+		0x30b08, 0x30c24,
 		0x30d00, 0x30d00,
 		0x30d08, 0x30d14,
 		0x30d1c, 0x30d20,
-		0x30d3c, 0x30d50,
+		0x30d3c, 0x30d3c,
+		0x30d48, 0x30d50,
 		0x31200, 0x3120c,
 		0x31220, 0x31220,
 		0x31240, 0x31240,
@@ -1078,27 +1386,65 @@
 		0x322c8, 0x322fc,
 		0x32600, 0x32630,
 		0x32a00, 0x32abc,
-		0x32b00, 0x32b70,
-		0x33000, 0x33048,
-		0x33060, 0x3309c,
-		0x330f0, 0x33148,
-		0x33160, 0x3319c,
-		0x331f0, 0x332e4,
-		0x332f8, 0x333e4,
-		0x333f8, 0x33448,
-		0x33460, 0x3349c,
-		0x334f0, 0x33548,
-		0x33560, 0x3359c,
-		0x335f0, 0x336e4,
-		0x336f8, 0x337e4,
+		0x32b00, 0x32b10,
+		0x32b20, 0x32b30,
+		0x32b40, 0x32b50,
+		0x32b60, 0x32b70,
+		0x33000, 0x33028,
+		0x33030, 0x33048,
+		0x33060, 0x33068,
+		0x33070, 0x3309c,
+		0x330f0, 0x33128,
+		0x33130, 0x33148,
+		0x33160, 0x33168,
+		0x33170, 0x3319c,
+		0x331f0, 0x33238,
+		0x33240, 0x33240,
+		0x33248, 0x33250,
+		0x3325c, 0x33264,
+		0x33270, 0x332b8,
+		0x332c0, 0x332e4,
+		0x332f8, 0x33338,
+		0x33340, 0x33340,
+		0x33348, 0x33350,
+		0x3335c, 0x33364,
+		0x33370, 0x333b8,
+		0x333c0, 0x333e4,
+		0x333f8, 0x33428,
+		0x33430, 0x33448,
+		0x33460, 0x33468,
+		0x33470, 0x3349c,
+		0x334f0, 0x33528,
+		0x33530, 0x33548,
+		0x33560, 0x33568,
+		0x33570, 0x3359c,
+		0x335f0, 0x33638,
+		0x33640, 0x33640,
+		0x33648, 0x33650,
+		0x3365c, 0x33664,
+		0x33670, 0x336b8,
+		0x336c0, 0x336e4,
+		0x336f8, 0x33738,
+		0x33740, 0x33740,
+		0x33748, 0x33750,
+		0x3375c, 0x33764,
+		0x33770, 0x337b8,
+		0x337c0, 0x337e4,
 		0x337f8, 0x337fc,
 		0x33814, 0x33814,
 		0x3382c, 0x3382c,
 		0x33880, 0x3388c,
 		0x338e8, 0x338ec,
-		0x33900, 0x33948,
-		0x33960, 0x3399c,
-		0x339f0, 0x33ae4,
+		0x33900, 0x33928,
+		0x33930, 0x33948,
+		0x33960, 0x33968,
+		0x33970, 0x3399c,
+		0x339f0, 0x33a38,
+		0x33a40, 0x33a40,
+		0x33a48, 0x33a50,
+		0x33a5c, 0x33a64,
+		0x33a70, 0x33ab8,
+		0x33ac0, 0x33ae4,
 		0x33af8, 0x33b10,
 		0x33b28, 0x33b28,
 		0x33b3c, 0x33b50,
@@ -1107,21 +1453,32 @@
 		0x33c3c, 0x33c50,
 		0x33cf0, 0x33cfc,
 		0x34000, 0x34030,
+		0x34038, 0x34038,
+		0x34040, 0x34040,
 		0x34100, 0x34144,
-		0x34190, 0x341d0,
+		0x34190, 0x341a0,
+		0x341a8, 0x341b8,
+		0x341c4, 0x341c8,
+		0x341d0, 0x341d0,
 		0x34200, 0x34318,
-		0x34400, 0x3452c,
+		0x34400, 0x344b4,
+		0x344c0, 0x3452c,
 		0x34540, 0x3461c,
-		0x34800, 0x34834,
+		0x34800, 0x34828,
+		0x34834, 0x34834,
 		0x348c0, 0x34908,
 		0x34910, 0x349ac,
-		0x34a00, 0x34a2c,
+		0x34a00, 0x34a14,
+		0x34a1c, 0x34a2c,
 		0x34a44, 0x34a50,
-		0x34a74, 0x34c24,
+		0x34a74, 0x34a74,
+		0x34a7c, 0x34afc,
+		0x34b08, 0x34c24,
 		0x34d00, 0x34d00,
 		0x34d08, 0x34d14,
 		0x34d1c, 0x34d20,
-		0x34d3c, 0x34d50,
+		0x34d3c, 0x34d3c,
+		0x34d48, 0x34d50,
 		0x35200, 0x3520c,
 		0x35220, 0x35220,
 		0x35240, 0x35240,
@@ -1141,27 +1498,65 @@
 		0x362c8, 0x362fc,
 		0x36600, 0x36630,
 		0x36a00, 0x36abc,
-		0x36b00, 0x36b70,
-		0x37000, 0x37048,
-		0x37060, 0x3709c,
-		0x370f0, 0x37148,
-		0x37160, 0x3719c,
-		0x371f0, 0x372e4,
-		0x372f8, 0x373e4,
-		0x373f8, 0x37448,
-		0x37460, 0x3749c,
-		0x374f0, 0x37548,
-		0x37560, 0x3759c,
-		0x375f0, 0x376e4,
-		0x376f8, 0x377e4,
+		0x36b00, 0x36b10,
+		0x36b20, 0x36b30,
+		0x36b40, 0x36b50,
+		0x36b60, 0x36b70,
+		0x37000, 0x37028,
+		0x37030, 0x37048,
+		0x37060, 0x37068,
+		0x37070, 0x3709c,
+		0x370f0, 0x37128,
+		0x37130, 0x37148,
+		0x37160, 0x37168,
+		0x37170, 0x3719c,
+		0x371f0, 0x37238,
+		0x37240, 0x37240,
+		0x37248, 0x37250,
+		0x3725c, 0x37264,
+		0x37270, 0x372b8,
+		0x372c0, 0x372e4,
+		0x372f8, 0x37338,
+		0x37340, 0x37340,
+		0x37348, 0x37350,
+		0x3735c, 0x37364,
+		0x37370, 0x373b8,
+		0x373c0, 0x373e4,
+		0x373f8, 0x37428,
+		0x37430, 0x37448,
+		0x37460, 0x37468,
+		0x37470, 0x3749c,
+		0x374f0, 0x37528,
+		0x37530, 0x37548,
+		0x37560, 0x37568,
+		0x37570, 0x3759c,
+		0x375f0, 0x37638,
+		0x37640, 0x37640,
+		0x37648, 0x37650,
+		0x3765c, 0x37664,
+		0x37670, 0x376b8,
+		0x376c0, 0x376e4,
+		0x376f8, 0x37738,
+		0x37740, 0x37740,
+		0x37748, 0x37750,
+		0x3775c, 0x37764,
+		0x37770, 0x377b8,
+		0x377c0, 0x377e4,
 		0x377f8, 0x377fc,
 		0x37814, 0x37814,
 		0x3782c, 0x3782c,
 		0x37880, 0x3788c,
 		0x378e8, 0x378ec,
-		0x37900, 0x37948,
-		0x37960, 0x3799c,
-		0x379f0, 0x37ae4,
+		0x37900, 0x37928,
+		0x37930, 0x37948,
+		0x37960, 0x37968,
+		0x37970, 0x3799c,
+		0x379f0, 0x37a38,
+		0x37a40, 0x37a40,
+		0x37a48, 0x37a50,
+		0x37a5c, 0x37a64,
+		0x37a70, 0x37ab8,
+		0x37ac0, 0x37ae4,
 		0x37af8, 0x37b10,
 		0x37b28, 0x37b28,
 		0x37b3c, 0x37b50,
@@ -1170,21 +1565,32 @@
 		0x37c3c, 0x37c50,
 		0x37cf0, 0x37cfc,
 		0x38000, 0x38030,
+		0x38038, 0x38038,
+		0x38040, 0x38040,
 		0x38100, 0x38144,
-		0x38190, 0x381d0,
+		0x38190, 0x381a0,
+		0x381a8, 0x381b8,
+		0x381c4, 0x381c8,
+		0x381d0, 0x381d0,
 		0x38200, 0x38318,
-		0x38400, 0x3852c,
+		0x38400, 0x384b4,
+		0x384c0, 0x3852c,
 		0x38540, 0x3861c,
-		0x38800, 0x38834,
+		0x38800, 0x38828,
+		0x38834, 0x38834,
 		0x388c0, 0x38908,
 		0x38910, 0x389ac,
-		0x38a00, 0x38a2c,
+		0x38a00, 0x38a14,
+		0x38a1c, 0x38a2c,
 		0x38a44, 0x38a50,
-		0x38a74, 0x38c24,
+		0x38a74, 0x38a74,
+		0x38a7c, 0x38afc,
+		0x38b08, 0x38c24,
 		0x38d00, 0x38d00,
 		0x38d08, 0x38d14,
 		0x38d1c, 0x38d20,
-		0x38d3c, 0x38d50,
+		0x38d3c, 0x38d3c,
+		0x38d48, 0x38d50,
 		0x39200, 0x3920c,
 		0x39220, 0x39220,
 		0x39240, 0x39240,
@@ -1204,27 +1610,65 @@
 		0x3a2c8, 0x3a2fc,
 		0x3a600, 0x3a630,
 		0x3aa00, 0x3aabc,
-		0x3ab00, 0x3ab70,
-		0x3b000, 0x3b048,
-		0x3b060, 0x3b09c,
-		0x3b0f0, 0x3b148,
-		0x3b160, 0x3b19c,
-		0x3b1f0, 0x3b2e4,
-		0x3b2f8, 0x3b3e4,
-		0x3b3f8, 0x3b448,
-		0x3b460, 0x3b49c,
-		0x3b4f0, 0x3b548,
-		0x3b560, 0x3b59c,
-		0x3b5f0, 0x3b6e4,
-		0x3b6f8, 0x3b7e4,
+		0x3ab00, 0x3ab10,
+		0x3ab20, 0x3ab30,
+		0x3ab40, 0x3ab50,
+		0x3ab60, 0x3ab70,
+		0x3b000, 0x3b028,
+		0x3b030, 0x3b048,
+		0x3b060, 0x3b068,
+		0x3b070, 0x3b09c,
+		0x3b0f0, 0x3b128,
+		0x3b130, 0x3b148,
+		0x3b160, 0x3b168,
+		0x3b170, 0x3b19c,
+		0x3b1f0, 0x3b238,
+		0x3b240, 0x3b240,
+		0x3b248, 0x3b250,
+		0x3b25c, 0x3b264,
+		0x3b270, 0x3b2b8,
+		0x3b2c0, 0x3b2e4,
+		0x3b2f8, 0x3b338,
+		0x3b340, 0x3b340,
+		0x3b348, 0x3b350,
+		0x3b35c, 0x3b364,
+		0x3b370, 0x3b3b8,
+		0x3b3c0, 0x3b3e4,
+		0x3b3f8, 0x3b428,
+		0x3b430, 0x3b448,
+		0x3b460, 0x3b468,
+		0x3b470, 0x3b49c,
+		0x3b4f0, 0x3b528,
+		0x3b530, 0x3b548,
+		0x3b560, 0x3b568,
+		0x3b570, 0x3b59c,
+		0x3b5f0, 0x3b638,
+		0x3b640, 0x3b640,
+		0x3b648, 0x3b650,
+		0x3b65c, 0x3b664,
+		0x3b670, 0x3b6b8,
+		0x3b6c0, 0x3b6e4,
+		0x3b6f8, 0x3b738,
+		0x3b740, 0x3b740,
+		0x3b748, 0x3b750,
+		0x3b75c, 0x3b764,
+		0x3b770, 0x3b7b8,
+		0x3b7c0, 0x3b7e4,
 		0x3b7f8, 0x3b7fc,
 		0x3b814, 0x3b814,
 		0x3b82c, 0x3b82c,
 		0x3b880, 0x3b88c,
 		0x3b8e8, 0x3b8ec,
-		0x3b900, 0x3b948,
-		0x3b960, 0x3b99c,
-		0x3b9f0, 0x3bae4,
+		0x3b900, 0x3b928,
+		0x3b930, 0x3b948,
+		0x3b960, 0x3b968,
+		0x3b970, 0x3b99c,
+		0x3b9f0, 0x3ba38,
+		0x3ba40, 0x3ba40,
+		0x3ba48, 0x3ba50,
+		0x3ba5c, 0x3ba64,
+		0x3ba70, 0x3bab8,
+		0x3bac0, 0x3bae4,
 		0x3baf8, 0x3bb10,
 		0x3bb28, 0x3bb28,
 		0x3bb3c, 0x3bb50,
@@ -1233,21 +1677,32 @@
 		0x3bc3c, 0x3bc50,
 		0x3bcf0, 0x3bcfc,
 		0x3c000, 0x3c030,
+		0x3c038, 0x3c038,
+		0x3c040, 0x3c040,
 		0x3c100, 0x3c144,
-		0x3c190, 0x3c1d0,
+		0x3c190, 0x3c1a0,
+		0x3c1a8, 0x3c1b8,
+		0x3c1c4, 0x3c1c8,
+		0x3c1d0, 0x3c1d0,
 		0x3c200, 0x3c318,
-		0x3c400, 0x3c52c,
+		0x3c400, 0x3c4b4,
+		0x3c4c0, 0x3c52c,
 		0x3c540, 0x3c61c,
-		0x3c800, 0x3c834,
+		0x3c800, 0x3c828,
+		0x3c834, 0x3c834,
 		0x3c8c0, 0x3c908,
 		0x3c910, 0x3c9ac,
-		0x3ca00, 0x3ca2c,
+		0x3ca00, 0x3ca14,
+		0x3ca1c, 0x3ca2c,
 		0x3ca44, 0x3ca50,
-		0x3ca74, 0x3cc24,
+		0x3ca74, 0x3ca74,
+		0x3ca7c, 0x3cafc,
+		0x3cb08, 0x3cc24,
 		0x3cd00, 0x3cd00,
 		0x3cd08, 0x3cd14,
 		0x3cd1c, 0x3cd20,
-		0x3cd3c, 0x3cd50,
+		0x3cd3c, 0x3cd3c,
+		0x3cd48, 0x3cd50,
 		0x3d200, 0x3d20c,
 		0x3d220, 0x3d220,
 		0x3d240, 0x3d240,
@@ -1267,27 +1722,65 @@
 		0x3e2c8, 0x3e2fc,
 		0x3e600, 0x3e630,
 		0x3ea00, 0x3eabc,
-		0x3eb00, 0x3eb70,
-		0x3f000, 0x3f048,
-		0x3f060, 0x3f09c,
-		0x3f0f0, 0x3f148,
-		0x3f160, 0x3f19c,
-		0x3f1f0, 0x3f2e4,
-		0x3f2f8, 0x3f3e4,
-		0x3f3f8, 0x3f448,
-		0x3f460, 0x3f49c,
-		0x3f4f0, 0x3f548,
-		0x3f560, 0x3f59c,
-		0x3f5f0, 0x3f6e4,
-		0x3f6f8, 0x3f7e4,
+		0x3eb00, 0x3eb10,
+		0x3eb20, 0x3eb30,
+		0x3eb40, 0x3eb50,
+		0x3eb60, 0x3eb70,
+		0x3f000, 0x3f028,
+		0x3f030, 0x3f048,
+		0x3f060, 0x3f068,
+		0x3f070, 0x3f09c,
+		0x3f0f0, 0x3f128,
+		0x3f130, 0x3f148,
+		0x3f160, 0x3f168,
+		0x3f170, 0x3f19c,
+		0x3f1f0, 0x3f238,
+		0x3f240, 0x3f240,
+		0x3f248, 0x3f250,
+		0x3f25c, 0x3f264,
+		0x3f270, 0x3f2b8,
+		0x3f2c0, 0x3f2e4,
+		0x3f2f8, 0x3f338,
+		0x3f340, 0x3f340,
+		0x3f348, 0x3f350,
+		0x3f35c, 0x3f364,
+		0x3f370, 0x3f3b8,
+		0x3f3c0, 0x3f3e4,
+		0x3f3f8, 0x3f428,
+		0x3f430, 0x3f448,
+		0x3f460, 0x3f468,
+		0x3f470, 0x3f49c,
+		0x3f4f0, 0x3f528,
+		0x3f530, 0x3f548,
+		0x3f560, 0x3f568,
+		0x3f570, 0x3f59c,
+		0x3f5f0, 0x3f638,
+		0x3f640, 0x3f640,
+		0x3f648, 0x3f650,
+		0x3f65c, 0x3f664,
+		0x3f670, 0x3f6b8,
+		0x3f6c0, 0x3f6e4,
+		0x3f6f8, 0x3f738,
+		0x3f740, 0x3f740,
+		0x3f748, 0x3f750,
+		0x3f75c, 0x3f764,
+		0x3f770, 0x3f7b8,
+		0x3f7c0, 0x3f7e4,
 		0x3f7f8, 0x3f7fc,
 		0x3f814, 0x3f814,
 		0x3f82c, 0x3f82c,
 		0x3f880, 0x3f88c,
 		0x3f8e8, 0x3f8ec,
-		0x3f900, 0x3f948,
-		0x3f960, 0x3f99c,
-		0x3f9f0, 0x3fae4,
+		0x3f900, 0x3f928,
+		0x3f930, 0x3f948,
+		0x3f960, 0x3f968,
+		0x3f970, 0x3f99c,
+		0x3f9f0, 0x3fa38,
+		0x3fa40, 0x3fa40,
+		0x3fa48, 0x3fa50,
+		0x3fa5c, 0x3fa64,
+		0x3fa70, 0x3fab8,
+		0x3fac0, 0x3fae4,
 		0x3faf8, 0x3fb10,
 		0x3fb28, 0x3fb28,
 		0x3fb3c, 0x3fb50,
@@ -1296,108 +1789,224 @@
 		0x3fc3c, 0x3fc50,
 		0x3fcf0, 0x3fcfc,
 		0x40000, 0x4000c,
-		0x40040, 0x40068,
-		0x4007c, 0x40144,
+		0x40040, 0x40050,
+		0x40060, 0x40068,
+		0x4007c, 0x4008c,
+		0x40094, 0x400b0,
+		0x400c0, 0x40144,
 		0x40180, 0x4018c,
-		0x40200, 0x40298,
-		0x402ac, 0x4033c,
+		0x40200, 0x40254,
+		0x40260, 0x40264,
+		0x40270, 0x40288,
+		0x40290, 0x40298,
+		0x402ac, 0x402c8,
+		0x402d0, 0x402e0,
+		0x402f0, 0x402f0,
+		0x40300, 0x4033c,
 		0x403f8, 0x403fc,
 		0x41304, 0x413c4,
-		0x41400, 0x4141c,
+		0x41400, 0x4140c,
+		0x41414, 0x4141c,
 		0x41480, 0x414d0,
-		0x44000, 0x44078,
-		0x440c0, 0x44278,
-		0x442c0, 0x44478,
-		0x444c0, 0x44678,
-		0x446c0, 0x44878,
-		0x448c0, 0x449fc,
-		0x45000, 0x45068,
+		0x44000, 0x44054,
+		0x4405c, 0x44078,
+		0x440c0, 0x44174,
+		0x44180, 0x441ac,
+		0x441b4, 0x441b8,
+		0x441c0, 0x44254,
+		0x4425c, 0x44278,
+		0x442c0, 0x44374,
+		0x44380, 0x443ac,
+		0x443b4, 0x443b8,
+		0x443c0, 0x44454,
+		0x4445c, 0x44478,
+		0x444c0, 0x44574,
+		0x44580, 0x445ac,
+		0x445b4, 0x445b8,
+		0x445c0, 0x44654,
+		0x4465c, 0x44678,
+		0x446c0, 0x44774,
+		0x44780, 0x447ac,
+		0x447b4, 0x447b8,
+		0x447c0, 0x44854,
+		0x4485c, 0x44878,
+		0x448c0, 0x44974,
+		0x44980, 0x449ac,
+		0x449b4, 0x449b8,
+		0x449c0, 0x449fc,
+		0x45000, 0x45004,
+		0x45010, 0x45030,
+		0x45040, 0x45060,
+		0x45068, 0x45068,
 		0x45080, 0x45084,
 		0x450a0, 0x450b0,
-		0x45200, 0x45268,
+		0x45200, 0x45204,
+		0x45210, 0x45230,
+		0x45240, 0x45260,
+		0x45268, 0x45268,
 		0x45280, 0x45284,
 		0x452a0, 0x452b0,
 		0x460c0, 0x460e4,
-		0x47000, 0x4708c,
+		0x47000, 0x4703c,
+		0x47044, 0x4708c,
 		0x47200, 0x47250,
-		0x47400, 0x47420,
+		0x47400, 0x47408,
+		0x47414, 0x47420,
 		0x47600, 0x47618,
 		0x47800, 0x47814,
 		0x48000, 0x4800c,
-		0x48040, 0x48068,
-		0x4807c, 0x48144,
+		0x48040, 0x48050,
+		0x48060, 0x48068,
+		0x4807c, 0x4808c,
+		0x48094, 0x480b0,
+		0x480c0, 0x48144,
 		0x48180, 0x4818c,
-		0x48200, 0x48298,
-		0x482ac, 0x4833c,
+		0x48200, 0x48254,
+		0x48260, 0x48264,
+		0x48270, 0x48288,
+		0x48290, 0x48298,
+		0x482ac, 0x482c8,
+		0x482d0, 0x482e0,
+		0x482f0, 0x482f0,
+		0x48300, 0x4833c,
 		0x483f8, 0x483fc,
 		0x49304, 0x493c4,
-		0x49400, 0x4941c,
+		0x49400, 0x4940c,
+		0x49414, 0x4941c,
 		0x49480, 0x494d0,
-		0x4c000, 0x4c078,
-		0x4c0c0, 0x4c278,
-		0x4c2c0, 0x4c478,
-		0x4c4c0, 0x4c678,
-		0x4c6c0, 0x4c878,
-		0x4c8c0, 0x4c9fc,
-		0x4d000, 0x4d068,
+		0x4c000, 0x4c054,
+		0x4c05c, 0x4c078,
+		0x4c0c0, 0x4c174,
+		0x4c180, 0x4c1ac,
+		0x4c1b4, 0x4c1b8,
+		0x4c1c0, 0x4c254,
+		0x4c25c, 0x4c278,
+		0x4c2c0, 0x4c374,
+		0x4c380, 0x4c3ac,
+		0x4c3b4, 0x4c3b8,
+		0x4c3c0, 0x4c454,
+		0x4c45c, 0x4c478,
+		0x4c4c0, 0x4c574,
+		0x4c580, 0x4c5ac,
+		0x4c5b4, 0x4c5b8,
+		0x4c5c0, 0x4c654,
+		0x4c65c, 0x4c678,
+		0x4c6c0, 0x4c774,
+		0x4c780, 0x4c7ac,
+		0x4c7b4, 0x4c7b8,
+		0x4c7c0, 0x4c854,
+		0x4c85c, 0x4c878,
+		0x4c8c0, 0x4c974,
+		0x4c980, 0x4c9ac,
+		0x4c9b4, 0x4c9b8,
+		0x4c9c0, 0x4c9fc,
+		0x4d000, 0x4d004,
+		0x4d010, 0x4d030,
+		0x4d040, 0x4d060,
+		0x4d068, 0x4d068,
 		0x4d080, 0x4d084,
 		0x4d0a0, 0x4d0b0,
-		0x4d200, 0x4d268,
+		0x4d200, 0x4d204,
+		0x4d210, 0x4d230,
+		0x4d240, 0x4d260,
+		0x4d268, 0x4d268,
 		0x4d280, 0x4d284,
 		0x4d2a0, 0x4d2b0,
 		0x4e0c0, 0x4e0e4,
-		0x4f000, 0x4f08c,
+		0x4f000, 0x4f03c,
+		0x4f044, 0x4f08c,
 		0x4f200, 0x4f250,
-		0x4f400, 0x4f420,
+		0x4f400, 0x4f408,
+		0x4f414, 0x4f420,
 		0x4f600, 0x4f618,
 		0x4f800, 0x4f814,
-		0x50000, 0x500cc,
+		0x50000, 0x50084,
+		0x50090, 0x500cc,
 		0x50400, 0x50400,
-		0x50800, 0x508cc,
+		0x50800, 0x50884,
+		0x50890, 0x508cc,
 		0x50c00, 0x50c00,
 		0x51000, 0x5101c,
 		0x51300, 0x51308,
 	};
 
 	static const unsigned int t6_reg_ranges[] = {
-		0x1008, 0x1124,
-		0x1138, 0x114c,
-		0x1180, 0x11b4,
+		0x1008, 0x101c,
+		0x1024, 0x10a8,
+		0x10b4, 0x10f8,
+		0x1100, 0x1114,
+		0x111c, 0x112c,
+		0x1138, 0x113c,
+		0x1144, 0x114c,
+		0x1180, 0x1184,
+		0x1190, 0x1194,
+		0x11a0, 0x11a4,
+		0x11b0, 0x11b4,
 		0x11fc, 0x1254,
 		0x1280, 0x133c,
 		0x1800, 0x18fc,
 		0x3000, 0x302c,
-		0x3060, 0x30d8,
+		0x3060, 0x30b0,
+		0x30b8, 0x30d8,
 		0x30e0, 0x30fc,
 		0x3140, 0x357c,
 		0x35a8, 0x35cc,
 		0x35ec, 0x35ec,
 		0x3600, 0x5624,
-		0x56cc, 0x575c,
+		0x56cc, 0x56ec,
+		0x56f4, 0x5720,
+		0x5728, 0x575c,
 		0x580c, 0x5814,
-		0x5890, 0x58bc,
+		0x5890, 0x589c,
+		0x58a4, 0x58ac,
+		0x58b8, 0x58bc,
 		0x5940, 0x595c,
 		0x5980, 0x598c,
-		0x59b0, 0x59dc,
+		0x59b0, 0x59c8,
+		0x59d0, 0x59dc,
 		0x59fc, 0x5a18,
 		0x5a60, 0x5a6c,
-		0x5a80, 0x5a9c,
+		0x5a80, 0x5a8c,
+		0x5a94, 0x5a9c,
 		0x5b94, 0x5bfc,
-		0x5c10, 0x5ec0,
+		0x5c10, 0x5e48,
+		0x5e50, 0x5e94,
+		0x5ea0, 0x5eb0,
+		0x5ec0, 0x5ec0,
 		0x5ec8, 0x5ecc,
-		0x6000, 0x6040,
-		0x6058, 0x619c,
+		0x6000, 0x6020,
+		0x6028, 0x6040,
+		0x6058, 0x609c,
+		0x60a8, 0x619c,
 		0x7700, 0x7798,
 		0x77c0, 0x7880,
 		0x78cc, 0x78fc,
-		0x7b00, 0x7c54,
-		0x7d00, 0x7efc,
+		0x7b00, 0x7b58,
+		0x7b60, 0x7b84,
+		0x7b8c, 0x7c54,
+		0x7d00, 0x7d38,
+		0x7d40, 0x7d84,
+		0x7d8c, 0x7ddc,
+		0x7de4, 0x7e04,
+		0x7e10, 0x7e1c,
+		0x7e24, 0x7e38,
+		0x7e40, 0x7e44,
+		0x7e4c, 0x7e78,
+		0x7e80, 0x7edc,
+		0x7ee8, 0x7efc,
 		0x8dc0, 0x8de4,
-		0x8df8, 0x8e84,
+		0x8df8, 0x8e04,
+		0x8e10, 0x8e84,
 		0x8ea0, 0x8f88,
-		0x8fb8, 0x9124,
+		0x8fb8, 0x9058,
+		0x9060, 0x9060,
+		0x9068, 0x90f8,
+		0x9100, 0x9124,
 		0x9400, 0x9470,
-		0x9600, 0x971c,
+		0x9600, 0x9600,
+		0x9608, 0x9638,
+		0x9640, 0x9704,
+		0x9710, 0x971c,
 		0x9800, 0x9808,
 		0x9820, 0x983c,
 		0x9850, 0x9864,
@@ -1411,109 +2020,170 @@
 		0x9f80, 0xa020,
 		0xd004, 0xd03c,
 		0xd100, 0xd118,
-		0xd200, 0xd31c,
+		0xd200, 0xd214,
+		0xd220, 0xd234,
+		0xd240, 0xd254,
+		0xd260, 0xd274,
+		0xd280, 0xd294,
+		0xd2a0, 0xd2b4,
+		0xd2c0, 0xd2d4,
+		0xd2e0, 0xd2f4,
+		0xd300, 0xd31c,
 		0xdfc0, 0xdfe0,
 		0xe000, 0xf008,
 		0x11000, 0x11014,
-		0x11048, 0x1117c,
-		0x11190, 0x11270,
+		0x11048, 0x1106c,
+		0x11074, 0x11088,
+		0x11098, 0x11120,
+		0x1112c, 0x1117c,
+		0x11190, 0x112e0,
 		0x11300, 0x1130c,
 		0x12000, 0x1206c,
 		0x19040, 0x1906c,
 		0x19078, 0x19080,
-		0x1908c, 0x19124,
-		0x19150, 0x191b0,
+		0x1908c, 0x190e8,
+		0x190f0, 0x190f8,
+		0x19100, 0x19110,
+		0x19120, 0x19124,
+		0x19150, 0x19194,
+		0x1919c, 0x191b0,
 		0x191d0, 0x191e8,
-		0x19238, 0x192bc,
-		0x193f8, 0x19474,
+		0x19238, 0x192b0,
+		0x192bc, 0x192bc,
+		0x19348, 0x1934c,
+		0x193f8, 0x19418,
+		0x19420, 0x19428,
+		0x19430, 0x19444,
+		0x1944c, 0x1946c,
+		0x19474, 0x19474,
 		0x19490, 0x194cc,
 		0x194f0, 0x194f8,
-		0x19c00, 0x19c80,
-		0x19c94, 0x19cbc,
-		0x19ce4, 0x19d28,
+		0x19c00, 0x19c48,
+		0x19c50, 0x19c80,
+		0x19c94, 0x19c98,
+		0x19ca0, 0x19cbc,
+		0x19ce4, 0x19ce4,
+		0x19cf0, 0x19cf8,
+		0x19d00, 0x19d28,
 		0x19d50, 0x19d78,
-		0x19d94, 0x19dc8,
+		0x19d94, 0x19d98,
+		0x19da0, 0x19dc8,
 		0x19df0, 0x19e10,
 		0x19e50, 0x19e6c,
-		0x19ea0, 0x19f34,
+		0x19ea0, 0x19ebc,
+		0x19ec4, 0x19ef4,
+		0x19f04, 0x19f2c,
+		0x19f34, 0x19f34,
 		0x19f40, 0x19f50,
 		0x19f90, 0x19fac,
-		0x19fc4, 0x19fe4,
-		0x1a000, 0x1a06c,
-		0x1a0b0, 0x1a120,
-		0x1a128, 0x1a138,
+		0x19fc4, 0x19fc8,
+		0x19fd0, 0x19fe4,
+		0x1a000, 0x1a004,
+		0x1a010, 0x1a06c,
+		0x1a0b0, 0x1a0e4,
+		0x1a0ec, 0x1a0f8,
+		0x1a100, 0x1a108,
+		0x1a114, 0x1a120,
+		0x1a128, 0x1a130,
+		0x1a138, 0x1a138,
 		0x1a190, 0x1a1c4,
 		0x1a1fc, 0x1a1fc,
 		0x1e008, 0x1e00c,
-		0x1e040, 0x1e04c,
+		0x1e040, 0x1e044,
+		0x1e04c, 0x1e04c,
 		0x1e284, 0x1e290,
 		0x1e2c0, 0x1e2c0,
 		0x1e2e0, 0x1e2e0,
 		0x1e300, 0x1e384,
 		0x1e3c0, 0x1e3c8,
 		0x1e408, 0x1e40c,
-		0x1e440, 0x1e44c,
+		0x1e440, 0x1e444,
+		0x1e44c, 0x1e44c,
 		0x1e684, 0x1e690,
 		0x1e6c0, 0x1e6c0,
 		0x1e6e0, 0x1e6e0,
 		0x1e700, 0x1e784,
 		0x1e7c0, 0x1e7c8,
 		0x1e808, 0x1e80c,
-		0x1e840, 0x1e84c,
+		0x1e840, 0x1e844,
+		0x1e84c, 0x1e84c,
 		0x1ea84, 0x1ea90,
 		0x1eac0, 0x1eac0,
 		0x1eae0, 0x1eae0,
 		0x1eb00, 0x1eb84,
 		0x1ebc0, 0x1ebc8,
 		0x1ec08, 0x1ec0c,
-		0x1ec40, 0x1ec4c,
+		0x1ec40, 0x1ec44,
+		0x1ec4c, 0x1ec4c,
 		0x1ee84, 0x1ee90,
 		0x1eec0, 0x1eec0,
 		0x1eee0, 0x1eee0,
 		0x1ef00, 0x1ef84,
 		0x1efc0, 0x1efc8,
 		0x1f008, 0x1f00c,
-		0x1f040, 0x1f04c,
+		0x1f040, 0x1f044,
+		0x1f04c, 0x1f04c,
 		0x1f284, 0x1f290,
 		0x1f2c0, 0x1f2c0,
 		0x1f2e0, 0x1f2e0,
 		0x1f300, 0x1f384,
 		0x1f3c0, 0x1f3c8,
 		0x1f408, 0x1f40c,
-		0x1f440, 0x1f44c,
+		0x1f440, 0x1f444,
+		0x1f44c, 0x1f44c,
 		0x1f684, 0x1f690,
 		0x1f6c0, 0x1f6c0,
 		0x1f6e0, 0x1f6e0,
 		0x1f700, 0x1f784,
 		0x1f7c0, 0x1f7c8,
 		0x1f808, 0x1f80c,
-		0x1f840, 0x1f84c,
+		0x1f840, 0x1f844,
+		0x1f84c, 0x1f84c,
 		0x1fa84, 0x1fa90,
 		0x1fac0, 0x1fac0,
 		0x1fae0, 0x1fae0,
 		0x1fb00, 0x1fb84,
 		0x1fbc0, 0x1fbc8,
 		0x1fc08, 0x1fc0c,
-		0x1fc40, 0x1fc4c,
+		0x1fc40, 0x1fc44,
+		0x1fc4c, 0x1fc4c,
 		0x1fe84, 0x1fe90,
 		0x1fec0, 0x1fec0,
 		0x1fee0, 0x1fee0,
 		0x1ff00, 0x1ff84,
 		0x1ffc0, 0x1ffc8,
-		0x30000, 0x30070,
-		0x30100, 0x301d0,
+		0x30000, 0x30030,
+		0x30038, 0x30038,
+		0x30040, 0x30040,
+		0x30048, 0x30048,
+		0x30050, 0x30050,
+		0x3005c, 0x30060,
+		0x30068, 0x30068,
+		0x30070, 0x30070,
+		0x30100, 0x30168,
+		0x30190, 0x301a0,
+		0x301a8, 0x301b8,
+		0x301c4, 0x301c8,
+		0x301d0, 0x301d0,
 		0x30200, 0x30320,
-		0x30400, 0x3052c,
+		0x30400, 0x304b4,
+		0x304c0, 0x3052c,
 		0x30540, 0x3061c,
-		0x30800, 0x30890,
+		0x30800, 0x308a0,
 		0x308c0, 0x30908,
 		0x30910, 0x309b8,
 		0x30a00, 0x30a04,
-		0x30a0c, 0x30a2c,
+		0x30a0c, 0x30a14,
+		0x30a1c, 0x30a2c,
 		0x30a44, 0x30a50,
-		0x30a74, 0x30c24,
-		0x30d00, 0x30d3c,
-		0x30d44, 0x30d7c,
+		0x30a74, 0x30a74,
+		0x30a7c, 0x30afc,
+		0x30b08, 0x30c24,
+		0x30d00, 0x30d14,
+		0x30d1c, 0x30d3c,
+		0x30d44, 0x30d4c,
+		0x30d54, 0x30d74,
+		0x30d7c, 0x30d7c,
 		0x30de0, 0x30de0,
 		0x30e00, 0x30ed4,
 		0x30f00, 0x30fa4,
@@ -1542,7 +2212,8 @@
 		0x31bb0, 0x31bb4,
 		0x31bc8, 0x31bd4,
 		0x32140, 0x3218c,
-		0x321f0, 0x32200,
+		0x321f0, 0x321f4,
+		0x32200, 0x32200,
 		0x32218, 0x32218,
 		0x32400, 0x32400,
 		0x32408, 0x3241c,
@@ -1551,46 +2222,108 @@
 		0x326a8, 0x326a8,
 		0x326ec, 0x326ec,
 		0x32a00, 0x32abc,
-		0x32b00, 0x32b78,
+		0x32b00, 0x32b38,
+		0x32b40, 0x32b58,
+		0x32b60, 0x32b78,
 		0x32c00, 0x32c00,
 		0x32c08, 0x32c3c,
 		0x32e00, 0x32e2c,
 		0x32f00, 0x32f2c,
-		0x33000, 0x330ac,
-		0x330c0, 0x331ac,
-		0x331c0, 0x332c4,
-		0x332e4, 0x333c4,
-		0x333e4, 0x334ac,
-		0x334c0, 0x335ac,
-		0x335c0, 0x336c4,
-		0x336e4, 0x337c4,
+		0x33000, 0x3302c,
+		0x33034, 0x33050,
+		0x33058, 0x33058,
+		0x33060, 0x3308c,
+		0x3309c, 0x330ac,
+		0x330c0, 0x330c0,
+		0x330c8, 0x330d0,
+		0x330d8, 0x330e0,
+		0x330ec, 0x3312c,
+		0x33134, 0x33150,
+		0x33158, 0x33158,
+		0x33160, 0x3318c,
+		0x3319c, 0x331ac,
+		0x331c0, 0x331c0,
+		0x331c8, 0x331d0,
+		0x331d8, 0x331e0,
+		0x331ec, 0x33290,
+		0x33298, 0x332c4,
+		0x332e4, 0x33390,
+		0x33398, 0x333c4,
+		0x333e4, 0x3342c,
+		0x33434, 0x33450,
+		0x33458, 0x33458,
+		0x33460, 0x3348c,
+		0x3349c, 0x334ac,
+		0x334c0, 0x334c0,
+		0x334c8, 0x334d0,
+		0x334d8, 0x334e0,
+		0x334ec, 0x3352c,
+		0x33534, 0x33550,
+		0x33558, 0x33558,
+		0x33560, 0x3358c,
+		0x3359c, 0x335ac,
+		0x335c0, 0x335c0,
+		0x335c8, 0x335d0,
+		0x335d8, 0x335e0,
+		0x335ec, 0x33690,
+		0x33698, 0x336c4,
+		0x336e4, 0x33790,
+		0x33798, 0x337c4,
 		0x337e4, 0x337fc,
 		0x33814, 0x33814,
 		0x33854, 0x33868,
 		0x33880, 0x3388c,
 		0x338c0, 0x338d0,
 		0x338e8, 0x338ec,
-		0x33900, 0x339ac,
-		0x339c0, 0x33ac4,
+		0x33900, 0x3392c,
+		0x33934, 0x33950,
+		0x33958, 0x33958,
+		0x33960, 0x3398c,
+		0x3399c, 0x339ac,
+		0x339c0, 0x339c0,
+		0x339c8, 0x339d0,
+		0x339d8, 0x339e0,
+		0x339ec, 0x33a90,
+		0x33a98, 0x33ac4,
 		0x33ae4, 0x33b10,
-		0x33b24, 0x33b50,
+		0x33b24, 0x33b28,
+		0x33b38, 0x33b50,
 		0x33bf0, 0x33c10,
-		0x33c24, 0x33c50,
+		0x33c24, 0x33c28,
+		0x33c38, 0x33c50,
 		0x33cf0, 0x33cfc,
-		0x34000, 0x34070,
-		0x34100, 0x341d0,
+		0x34000, 0x34030,
+		0x34038, 0x34038,
+		0x34040, 0x34040,
+		0x34048, 0x34048,
+		0x34050, 0x34050,
+		0x3405c, 0x34060,
+		0x34068, 0x34068,
+		0x34070, 0x34070,
+		0x34100, 0x34168,
+		0x34190, 0x341a0,
+		0x341a8, 0x341b8,
+		0x341c4, 0x341c8,
+		0x341d0, 0x341d0,
 		0x34200, 0x34320,
-		0x34400, 0x3452c,
+		0x34400, 0x344b4,
+		0x344c0, 0x3452c,
 		0x34540, 0x3461c,
-		0x34800, 0x34890,
+		0x34800, 0x348a0,
 		0x348c0, 0x34908,
 		0x34910, 0x349b8,
 		0x34a00, 0x34a04,
-		0x34a0c, 0x34a2c,
+		0x34a0c, 0x34a14,
+		0x34a1c, 0x34a2c,
 		0x34a44, 0x34a50,
-		0x34a74, 0x34c24,
-		0x34d00, 0x34d3c,
-		0x34d44, 0x34d7c,
+		0x34a74, 0x34a74,
+		0x34a7c, 0x34afc,
+		0x34b08, 0x34c24,
+		0x34d00, 0x34d14,
+		0x34d1c, 0x34d3c,
+		0x34d44, 0x34d4c,
+		0x34d54, 0x34d74,
+		0x34d7c, 0x34d7c,
 		0x34de0, 0x34de0,
 		0x34e00, 0x34ed4,
 		0x34f00, 0x34fa4,
@@ -1619,7 +2352,8 @@
 		0x35bb0, 0x35bb4,
 		0x35bc8, 0x35bd4,
 		0x36140, 0x3618c,
-		0x361f0, 0x36200,
+		0x361f0, 0x361f4,
+		0x36200, 0x36200,
 		0x36218, 0x36218,
 		0x36400, 0x36400,
 		0x36408, 0x3641c,
@@ -1628,31 +2362,75 @@
 		0x366a8, 0x366a8,
 		0x366ec, 0x366ec,
 		0x36a00, 0x36abc,
-		0x36b00, 0x36b78,
+		0x36b00, 0x36b38,
+		0x36b40, 0x36b58,
+		0x36b60, 0x36b78,
 		0x36c00, 0x36c00,
 		0x36c08, 0x36c3c,
 		0x36e00, 0x36e2c,
 		0x36f00, 0x36f2c,
-		0x37000, 0x370ac,
-		0x370c0, 0x371ac,
-		0x371c0, 0x372c4,
-		0x372e4, 0x373c4,
-		0x373e4, 0x374ac,
-		0x374c0, 0x375ac,
-		0x375c0, 0x376c4,
-		0x376e4, 0x377c4,
+		0x37000, 0x3702c,
+		0x37034, 0x37050,
+		0x37058, 0x37058,
+		0x37060, 0x3708c,
+		0x3709c, 0x370ac,
+		0x370c0, 0x370c0,
+		0x370c8, 0x370d0,
+		0x370d8, 0x370e0,
+		0x370ec, 0x3712c,
+		0x37134, 0x37150,
+		0x37158, 0x37158,
+		0x37160, 0x3718c,
+		0x3719c, 0x371ac,
+		0x371c0, 0x371c0,
+		0x371c8, 0x371d0,
+		0x371d8, 0x371e0,
+		0x371ec, 0x37290,
+		0x37298, 0x372c4,
+		0x372e4, 0x37390,
+		0x37398, 0x373c4,
+		0x373e4, 0x3742c,
+		0x37434, 0x37450,
+		0x37458, 0x37458,
+		0x37460, 0x3748c,
+		0x3749c, 0x374ac,
+		0x374c0, 0x374c0,
+		0x374c8, 0x374d0,
+		0x374d8, 0x374e0,
+		0x374ec, 0x3752c,
+		0x37534, 0x37550,
+		0x37558, 0x37558,
+		0x37560, 0x3758c,
+		0x3759c, 0x375ac,
+		0x375c0, 0x375c0,
+		0x375c8, 0x375d0,
+		0x375d8, 0x375e0,
+		0x375ec, 0x37690,
+		0x37698, 0x376c4,
+		0x376e4, 0x37790,
+		0x37798, 0x377c4,
 		0x377e4, 0x377fc,
 		0x37814, 0x37814,
 		0x37854, 0x37868,
 		0x37880, 0x3788c,
 		0x378c0, 0x378d0,
 		0x378e8, 0x378ec,
-		0x37900, 0x379ac,
-		0x379c0, 0x37ac4,
+		0x37900, 0x3792c,
+		0x37934, 0x37950,
+		0x37958, 0x37958,
+		0x37960, 0x3798c,
+		0x3799c, 0x379ac,
+		0x379c0, 0x379c0,
+		0x379c8, 0x379d0,
+		0x379d8, 0x379e0,
+		0x379ec, 0x37a90,
+		0x37a98, 0x37ac4,
 		0x37ae4, 0x37b10,
-		0x37b24, 0x37b50,
+		0x37b24, 0x37b28,
+		0x37b38, 0x37b50,
 		0x37bf0, 0x37c10,
-		0x37c24, 0x37c50,
+		0x37c24, 0x37c28,
+		0x37c38, 0x37c50,
 		0x37cf0, 0x37cfc,
 		0x40040, 0x40040,
 		0x40080, 0x40084,
@@ -1664,36 +2442,62 @@
 		0x40280, 0x40280,
 		0x40304, 0x40304,
 		0x40330, 0x4033c,
-		0x41304, 0x413dc,
-		0x41400, 0x4141c,
+		0x41304, 0x413c8,
+		0x413d0, 0x413dc,
+		0x413f0, 0x413f0,
+		0x41400, 0x4140c,
+		0x41414, 0x4141c,
 		0x41480, 0x414d0,
 		0x44000, 0x4407c,
-		0x440c0, 0x4427c,
-		0x442c0, 0x4447c,
-		0x444c0, 0x4467c,
-		0x446c0, 0x4487c,
-		0x448c0, 0x44a7c,
-		0x44ac0, 0x44c7c,
-		0x44cc0, 0x44e7c,
-		0x44ec0, 0x4507c,
-		0x450c0, 0x451fc,
-		0x45800, 0x45868,
+		0x440c0, 0x441ac,
+		0x441b4, 0x4427c,
+		0x442c0, 0x443ac,
+		0x443b4, 0x4447c,
+		0x444c0, 0x445ac,
+		0x445b4, 0x4467c,
+		0x446c0, 0x447ac,
+		0x447b4, 0x4487c,
+		0x448c0, 0x449ac,
+		0x449b4, 0x44a7c,
+		0x44ac0, 0x44bac,
+		0x44bb4, 0x44c7c,
+		0x44cc0, 0x44dac,
+		0x44db4, 0x44e7c,
+		0x44ec0, 0x44fac,
+		0x44fb4, 0x4507c,
+		0x450c0, 0x451ac,
+		0x451b4, 0x451fc,
+		0x45800, 0x45804,
+		0x45810, 0x45830,
+		0x45840, 0x45860,
+		0x45868, 0x45868,
 		0x45880, 0x45884,
 		0x458a0, 0x458b0,
-		0x45a00, 0x45a68,
+		0x45a00, 0x45a04,
+		0x45a10, 0x45a30,
+		0x45a40, 0x45a60,
+		0x45a68, 0x45a68,
 		0x45a80, 0x45a84,
 		0x45aa0, 0x45ab0,
 		0x460c0, 0x460e4,
-		0x47000, 0x4708c,
+		0x47000, 0x4703c,
+		0x47044, 0x4708c,
 		0x47200, 0x47250,
-		0x47400, 0x47420,
+		0x47400, 0x47408,
+		0x47414, 0x47420,
 		0x47600, 0x47618,
-		0x47800, 0x4782c,
-		0x50000, 0x500cc,
+		0x47800, 0x47814,
+		0x47820, 0x4782c,
+		0x50000, 0x50084,
+		0x50090, 0x500cc,
+		0x50300, 0x50384,
 		0x50400, 0x50400,
-		0x50800, 0x508cc,
+		0x50800, 0x50884,
+		0x50890, 0x508cc,
+		0x50b00, 0x50b84,
 		0x50c00, 0x50c00,
-		0x51000, 0x510b0,
+		0x51000, 0x51020,
+		0x51028, 0x510b0,
 		0x51300, 0x51324,
 	};
 
@@ -2177,11 +2981,15 @@
  */
 int t4_check_fw_version(struct adapter *adap)
 {
-	int ret, major, minor, micro;
+	int i, ret, major, minor, micro;
 	int exp_major, exp_minor, exp_micro;
 	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
 
 	ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+	/* Try multiple times before returning error */
+	for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
+		ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+
 	if (ret)
 		return ret;
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index b2b5e5b..0cfa5d7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -56,7 +56,7 @@
  * Generic information about the driver.
  */
 #define DRV_VERSION "2.0.0-ko"
-#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver"
+#define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
 
 /*
  * Module Parameters.
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 8b53f7d..6401ba99 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -143,6 +143,7 @@
 	struct vnic_dev *vdev;
 	struct timer_list notify_timer;
 	struct work_struct reset;
+	struct work_struct tx_hang_reset;
 	struct work_struct change_mtu_work;
 	struct msix_entry msix_entry[ENIC_INTR_MAX];
 	struct enic_msix_entry msix[ENIC_INTR_MAX];
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 3352d02..0c22fd0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -178,13 +178,15 @@
 	return 0;
 }
 
-static void enic_log_q_error(struct enic *enic)
+static bool enic_log_q_error(struct enic *enic)
 {
 	unsigned int i;
 	u32 error_status;
+	bool err = false;
 
 	for (i = 0; i < enic->wq_count; i++) {
 		error_status = vnic_wq_error_status(&enic->wq[i]);
+		err |= error_status;
 		if (error_status)
 			netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
 				i, error_status);
@@ -192,10 +194,13 @@
 
 	for (i = 0; i < enic->rq_count; i++) {
 		error_status = vnic_rq_error_status(&enic->rq[i]);
+		err |= error_status;
 		if (error_status)
 			netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
 				i, error_status);
 	}
+
+	return err;
 }
 
 static void enic_msglvl_check(struct enic *enic)
@@ -333,10 +338,9 @@
 
 	vnic_intr_return_all_credits(&enic->intr[intr]);
 
-	enic_log_q_error(enic);
-
-	/* schedule recovery from WQ/RQ error */
-	schedule_work(&enic->reset);
+	if (enic_log_q_error(enic))
+		/* schedule recovery from WQ/RQ error */
+		schedule_work(&enic->reset);
 
 	return IRQ_HANDLED;
 }
@@ -804,7 +808,7 @@
 static void enic_tx_timeout(struct net_device *netdev)
 {
 	struct enic *enic = netdev_priv(netdev);
-	schedule_work(&enic->reset);
+	schedule_work(&enic->tx_hang_reset);
 }
 
 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
@@ -1924,6 +1928,19 @@
 	return err;
 }
 
+static int enic_dev_soft_reset(struct enic *enic)
+{
+	int err;
+
+	err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
+			    vnic_dev_soft_reset_done, 0);
+	if (err)
+		netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n",
+			   err);
+
+	return err;
+}
+
 static int enic_dev_hang_reset(struct enic *enic)
 {
 	int err;
@@ -2060,6 +2077,26 @@
 	rtnl_lock();
 
 	spin_lock(&enic->enic_api_lock);
+	enic_stop(enic->netdev);
+	enic_dev_soft_reset(enic);
+	enic_reset_addr_lists(enic);
+	enic_init_vnic_resources(enic);
+	enic_set_rss_nic_cfg(enic);
+	enic_dev_set_ig_vlan_rewrite_mode(enic);
+	enic_open(enic->netdev);
+	spin_unlock(&enic->enic_api_lock);
+	call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
+
+	rtnl_unlock();
+}
+
+static void enic_tx_hang_reset(struct work_struct *work)
+{
+	struct enic *enic = container_of(work, struct enic, tx_hang_reset);
+
+	rtnl_lock();
+
+	spin_lock(&enic->enic_api_lock);
 	enic_dev_hang_notify(enic);
 	enic_stop(enic->netdev);
 	enic_dev_hang_reset(enic);
@@ -2583,6 +2620,7 @@
 
 	enic_set_rx_coal_setting(enic);
 	INIT_WORK(&enic->reset, enic_reset);
+	INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
 	INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
 
 	for (i = 0; i < enic->wq_count; i++)
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index a3badef..1ffd105 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -659,14 +659,14 @@
 	return 0;
 }
 
-static int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
 {
 	u64 a0 = (u32)arg, a1 = 0;
 	int wait = 1000;
 	return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
 }
 
-static int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
 {
 	u64 a0 = 0, a1 = 0;
 	int wait = 1000;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index b013b6a..54156c4 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -155,7 +155,9 @@
 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev);
 int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev);
 int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
+int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
 int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
+int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
 	enum vnic_dev_intr_mode intr_mode);
 enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index a02ecc4..cadcee6 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1597,7 +1597,6 @@
 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 	strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
-	info->eedump_len = DE_EEPROM_SIZE;
 }
 
 static int de_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8215409..d463563 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -592,6 +592,7 @@
 	int be_get_temp_freq;
 	struct be_hwmon hwmon_info;
 	u8 pf_number;
+	u8 pci_func_num;
 	struct rss_info rss_info;
 	/* Filters for packets that need to be sent to BMC */
 	u32 bmc_filt_mask;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index eb32391..1795c93 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -851,8 +851,10 @@
 		return status;
 
 	dest_wrb = be_cmd_copy(adapter, wrb);
-	if (!dest_wrb)
-		return -EBUSY;
+	if (!dest_wrb) {
+		status = -EBUSY;
+		goto unlock;
+	}
 
 	if (use_mcc(adapter))
 		status = be_mcc_notify_wait(adapter);
@@ -862,6 +864,7 @@
 	if (!status)
 		memcpy(wrb, dest_wrb, sizeof(*wrb));
 
+unlock:
 	be_cmd_unlock(adapter);
 	return status;
 }
@@ -1984,6 +1987,8 @@
 			 be_if_cap_flags(adapter));
 	}
 	flags &= be_if_cap_flags(adapter);
+	if (!flags)
+		return -ENOTSUPP;
 
 	return __be_cmd_rx_filter(adapter, flags, value);
 }
@@ -2887,6 +2892,7 @@
 	if (!status) {
 		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
 		adapter->hba_port_num = attribs->hba_attribs.phy_port;
+		adapter->pci_func_num = attribs->pci_func_num;
 		serial_num = attribs->hba_attribs.controller_serial_number;
 		for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
 			adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
@@ -3709,7 +3715,6 @@
 			status = -EINVAL;
 			goto err;
 		}
-
 		adapter->pf_number = desc->pf_num;
 		be_copy_nic_desc(res, desc);
 	}
@@ -3721,7 +3726,10 @@
 	return status;
 }
 
-/* Will use MBOX only if MCCQ has not been created */
+/* Will use MBOX only if MCCQ has not been created
+ * non-zero domain => a PF is querying this on behalf of a VF
+ * zero domain => a PF or a VF is querying this for itself
+ */
 int be_cmd_get_profile_config(struct be_adapter *adapter,
 			      struct be_resources *res, u8 query, u8 domain)
 {
@@ -3748,10 +3756,15 @@
 			       OPCODE_COMMON_GET_PROFILE_CONFIG,
 			       cmd.size, &wrb, &cmd);
 
-	req->hdr.domain = domain;
 	if (!lancer_chip(adapter))
 		req->hdr.version = 1;
 	req->type = ACTIVE_PROFILE_TYPE;
+	/* When a function is querying profile information relating to
+	 * itself hdr.pf_number must be set to it's pci_func_num + 1
+	 */
+	req->hdr.domain = domain;
+	if (domain == 0)
+		req->hdr.pf_num = adapter->pci_func_num + 1;
 
 	/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
 	 * descriptors with all bits set to "1" for the fields which can be
@@ -3921,12 +3934,16 @@
 			vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
 					     BE_IF_FLAGS_DEFQ_RSS);
 		}
-
-		nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
 	} else {
 		num_vf_qs = 1;
 	}
 
+	if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+		nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+		vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+	}
+
+	nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
 	nic_vft->rq_count = cpu_to_le16(num_vf_qs);
 	nic_vft->txq_count = cpu_to_le16(num_vf_qs);
 	nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 7d178bd..91155ea 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -289,7 +289,9 @@
 	u32 timeout;		/* dword 1 */
 	u32 request_length;	/* dword 2 */
 	u8 version;		/* dword 3 */
-	u8 rsvd[3];		/* dword 3 */
+	u8 rsvd1;		/* dword 3 */
+	u8 pf_num;		/* dword 3 */
+	u8 rsvd2;		/* dword 3 */
 };
 
 #define RESP_HDR_INFO_OPCODE_SHIFT	0	/* bits 0 - 7 */
@@ -1652,7 +1654,11 @@
 
 struct mgmt_controller_attrib {
 	struct mgmt_hba_attribs hba_attribs;
-	u32 rsvd0[10];
+	u32 rsvd0[2];
+	u16 rsvd1;
+	u8 pci_func_num;
+	u8 rsvd2;
+	u32 rsvd3[7];
 } __packed;
 
 struct be_cmd_req_cntl_attribs {
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 2c9ed17..f4cb8e4 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -234,9 +234,6 @@
 
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->testinfo_len = 0;
-	drvinfo->regdump_len = 0;
-	drvinfo->eedump_len = 0;
 }
 
 static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 7bf51a1..eb48a97 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1123,11 +1123,12 @@
 					   struct sk_buff *skb,
 					   struct be_wrb_params *wrb_params)
 {
-	/* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
-	 * less may cause a transmit stall on that port. So the work-around is
-	 * to pad short packets (<= 32 bytes) to a 36-byte length.
+	/* Lancer, SH and BE3 in SRIOV mode have a bug wherein
+	 * packets that are 32b or less may cause a transmit stall
+	 * on that port. The workaround is to pad such packets
+	 * (len <= 32 bytes) to a minimum length of 36b.
 	 */
-	if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
+	if (skb->len <= 32) {
 		if (skb_put_padto(skb, 36))
 			return NULL;
 	}
@@ -4205,10 +4206,6 @@
 	int status, level;
 	u16 profile_id;
 
-	status = be_cmd_get_cntl_attributes(adapter);
-	if (status)
-		return status;
-
 	status = be_cmd_query_fw_cfg(adapter);
 	if (status)
 		return status;
@@ -4407,6 +4404,11 @@
 	if (!lancer_chip(adapter))
 		be_cmd_req_native_mode(adapter);
 
+	/* Need to invoke this cmd first to get the PCI Function Number */
+	status = be_cmd_get_cntl_attributes(adapter);
+	if (status)
+		return status;
+
 	if (!BE2_chip(adapter) && be_physfn(adapter))
 		be_alloc_sriov_res(adapter);
 
@@ -4999,7 +5001,15 @@
 		return false;
 	}
 
-	return (fhdr->asic_type_rev >= adapter->asic_rev);
+	/* In BE3 FW images the "asic_type_rev" field doesn't track the
+	 * asic_rev of the chips it is compatible with.
+	 * When asic_type_rev is 0 the image is compatible only with
+	 * pre-BE3-R chips (asic_rev < 0x10)
+	 */
+	if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
+		return adapter->asic_rev < 0x10;
+	else
+		return (fhdr->asic_type_rev >= adapter->asic_rev);
 }
 
 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 1543cf0..f9e7446 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -112,9 +112,8 @@
 	unsigned long flags;
 	u32 val, tempval;
 	int inc;
-	struct timespec ts;
+	struct timespec64 ts;
 	u64 ns;
-	u32 remainder;
 	val = 0;
 
 	if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
@@ -163,8 +162,7 @@
 		tempval = readl(fep->hwp + FEC_ATIME);
 		/* Convert the ptp local counter to 1588 timestamp */
 		ns = timecounter_cyc2time(&fep->tc, tempval);
-		ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
-		ts.tv_nsec = remainder;
+		ts = ns_to_timespec64(ns);
 
 		/* The tempval is  less than 3 seconds, and  so val is less than
 		 * 4 seconds. No overflow for 32bit calculation.
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 3c40f6b..55c3623 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -198,11 +198,13 @@
 
 #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 /*
+ * Return the TBIPA address, starting from the address
+ * of the mapped GFAR MDIO registers (struct gfar)
  * This is mildly evil, but so is our hardware for doing this.
  * Also, we have to cast back to struct gfar because of
  * definition weirdness done in gianfar.h.
  */
-static uint32_t __iomem *get_gfar_tbipa(void __iomem *p)
+static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p)
 {
 	struct gfar __iomem *enet_regs = p;
 
@@ -210,6 +212,15 @@
 }
 
 /*
+ * Return the TBIPA address, starting from the address
+ * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar)
+ */
+static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p)
+{
+	return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs));
+}
+
+/*
  * Return the TBIPAR address for an eTSEC2 node
  */
 static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
@@ -220,11 +231,12 @@
 
 #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
 /*
- * Return the TBIPAR address for a QE MDIO node
+ * Return the TBIPAR address for a QE MDIO node, starting from the address
+ * of the mapped MII registers (struct fsl_pq_mii)
  */
 static uint32_t __iomem *get_ucc_tbipa(void __iomem *p)
 {
-	struct fsl_pq_mdio __iomem *mdio = p;
+	struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii);
 
 	return &mdio->utbipar;
 }
@@ -300,14 +312,14 @@
 		.compatible = "fsl,gianfar-tbi",
 		.data = &(struct fsl_pq_mdio_data) {
 			.mii_offset = 0,
-			.get_tbipa = get_gfar_tbipa,
+			.get_tbipa = get_gfar_tbipa_from_mii,
 		},
 	},
 	{
 		.compatible = "fsl,gianfar-mdio",
 		.data = &(struct fsl_pq_mdio_data) {
 			.mii_offset = 0,
-			.get_tbipa = get_gfar_tbipa,
+			.get_tbipa = get_gfar_tbipa_from_mii,
 		},
 	},
 	{
@@ -315,7 +327,7 @@
 		.compatible = "gianfar",
 		.data = &(struct fsl_pq_mdio_data) {
 			.mii_offset = offsetof(struct fsl_pq_mdio, mii),
-			.get_tbipa = get_gfar_tbipa,
+			.get_tbipa = get_gfar_tbipa_from_mdio,
 		},
 	},
 	{
@@ -445,6 +457,16 @@
 
 			tbipa = data->get_tbipa(priv->map);
 
+			/*
+			 * Add consistency check to make sure TBI is contained
+			 * within the mapped range (not because we would get a
+			 * segfault, rather to catch bugs in computing TBI
+			 * address). Print error message but continue anyway.
+			 */
+			if ((void *)tbipa > priv->map + resource_size(&res) - 4)
+				dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n",
+					((void *)tbipa - priv->map) + 4);
+
 			iowrite32be(be32_to_cpup(prop), tbipa);
 		}
 	}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 710715f..47f0400 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -107,7 +107,7 @@
 
 #include "gianfar.h"
 
-#define TX_TIMEOUT      (1*HZ)
+#define TX_TIMEOUT      (5*HZ)
 
 const char gfar_driver_version[] = "2.0";
 
@@ -907,6 +907,9 @@
 	if (of_find_property(np, "fsl,magic-packet", NULL))
 		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
 
+	if (of_get_property(np, "fsl,wake-on-filer", NULL))
+		priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
+
 	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
 
 	/* In the case of a fixed PHY, the DT node associated
@@ -1415,8 +1418,14 @@
 		goto register_fail;
 	}
 
-	device_set_wakeup_capable(&dev->dev, priv->device_flags &
-				  FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
+		priv->wol_supported |= GFAR_WOL_MAGIC;
+
+	if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
+	    priv->rx_filer_enable)
+		priv->wol_supported |= GFAR_WOL_FILER_UCAST;
+
+	device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
 
 	/* fill out IRQ number and name fields */
 	for (i = 0; i < priv->num_grps; i++) {
@@ -1479,15 +1488,122 @@
 
 #ifdef CONFIG_PM
 
+static void __gfar_filer_disable(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 temp;
+
+	temp = gfar_read(&regs->rctrl);
+	temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
+	gfar_write(&regs->rctrl, temp);
+}
+
+static void __gfar_filer_enable(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 temp;
+
+	temp = gfar_read(&regs->rctrl);
+	temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
+	gfar_write(&regs->rctrl, temp);
+}
+
+/* Filer rules implementing wol capabilities */
+static void gfar_filer_config_wol(struct gfar_private *priv)
+{
+	unsigned int i;
+	u32 rqfcr;
+
+	__gfar_filer_disable(priv);
+
+	/* clear the filer table, reject any packet by default */
+	rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
+	for (i = 0; i <= MAX_FILER_IDX; i++)
+		gfar_write_filer(priv, i, rqfcr, 0);
+
+	i = 0;
+	if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
+		/* unicast packet, accept it */
+		struct net_device *ndev = priv->ndev;
+		/* get the default rx queue index */
+		u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
+		u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
+				    (ndev->dev_addr[1] << 8) |
+				     ndev->dev_addr[2];
+
+		rqfcr = (qindex << 10) | RQFCR_AND |
+			RQFCR_CMP_EXACT | RQFCR_PID_DAH;
+
+		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
+
+		dest_mac_addr = (ndev->dev_addr[3] << 16) |
+				(ndev->dev_addr[4] << 8) |
+				 ndev->dev_addr[5];
+		rqfcr = (qindex << 10) | RQFCR_GPI |
+			RQFCR_CMP_EXACT | RQFCR_PID_DAL;
+		gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
+	}
+
+	__gfar_filer_enable(priv);
+}
+
+static void gfar_filer_restore_table(struct gfar_private *priv)
+{
+	u32 rqfcr, rqfpr;
+	unsigned int i;
+
+	__gfar_filer_disable(priv);
+
+	for (i = 0; i <= MAX_FILER_IDX; i++) {
+		rqfcr = priv->ftp_rqfcr[i];
+		rqfpr = priv->ftp_rqfpr[i];
+		gfar_write_filer(priv, i, rqfcr, rqfpr);
+	}
+
+	__gfar_filer_enable(priv);
+}
+
+/* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
+static void gfar_start_wol_filer(struct gfar_private *priv)
+{
+	struct gfar __iomem *regs = priv->gfargrp[0].regs;
+	u32 tempval;
+	int i = 0;
+
+	/* Enable Rx hw queues */
+	gfar_write(&regs->rqueue, priv->rqueue);
+
+	/* Initialize DMACTRL to have WWR and WOP */
+	tempval = gfar_read(&regs->dmactrl);
+	tempval |= DMACTRL_INIT_SETTINGS;
+	gfar_write(&regs->dmactrl, tempval);
+
+	/* Make sure we aren't stopped */
+	tempval = gfar_read(&regs->dmactrl);
+	tempval &= ~DMACTRL_GRS;
+	gfar_write(&regs->dmactrl, tempval);
+
+	for (i = 0; i < priv->num_grps; i++) {
+		regs = priv->gfargrp[i].regs;
+		/* Clear RHLT, so that the DMA starts polling now */
+		gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
+		/* enable the Filer General Purpose Interrupt */
+		gfar_write(&regs->imask, IMASK_FGPI);
+	}
+
+	/* Enable Rx DMA */
+	tempval = gfar_read(&regs->maccfg1);
+	tempval |= MACCFG1_RX_EN;
+	gfar_write(&regs->maccfg1, tempval);
+}
+
 static int gfar_suspend(struct device *dev)
 {
 	struct gfar_private *priv = dev_get_drvdata(dev);
 	struct net_device *ndev = priv->ndev;
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 tempval;
-	int magic_packet = priv->wol_en &&
-			   (priv->device_flags &
-			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+	u16 wol = priv->wol_opts;
 
 	if (!netif_running(ndev))
 		return 0;
@@ -1499,7 +1615,7 @@
 
 	gfar_halt(priv);
 
-	if (magic_packet) {
+	if (wol & GFAR_WOL_MAGIC) {
 		/* Enable interrupt on Magic Packet */
 		gfar_write(&regs->imask, IMASK_MAG);
 
@@ -1513,6 +1629,10 @@
 		tempval |= MACCFG1_RX_EN;
 		gfar_write(&regs->maccfg1, tempval);
 
+	} else if (wol & GFAR_WOL_FILER_UCAST) {
+		gfar_filer_config_wol(priv);
+		gfar_start_wol_filer(priv);
+
 	} else {
 		phy_stop(priv->phydev);
 	}
@@ -1526,18 +1646,22 @@
 	struct net_device *ndev = priv->ndev;
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	u32 tempval;
-	int magic_packet = priv->wol_en &&
-			   (priv->device_flags &
-			    FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+	u16 wol = priv->wol_opts;
 
 	if (!netif_running(ndev))
 		return 0;
 
-	if (magic_packet) {
+	if (wol & GFAR_WOL_MAGIC) {
 		/* Disable Magic Packet mode */
 		tempval = gfar_read(&regs->maccfg2);
 		tempval &= ~MACCFG2_MPEN;
 		gfar_write(&regs->maccfg2, tempval);
+
+	} else if (wol & GFAR_WOL_FILER_UCAST) {
+		/* need to stop rx only, tx is already down */
+		gfar_halt(priv);
+		gfar_filer_restore_table(priv);
+
 	} else {
 		phy_start(priv->phydev);
 	}
@@ -1998,6 +2122,8 @@
 				  gfar_irq(grp, RX)->irq);
 			goto rx_irq_fail;
 		}
+		enable_irq_wake(gfar_irq(grp, RX)->irq);
+
 	} else {
 		err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
 				  gfar_irq(grp, TX)->name, grp);
@@ -2743,7 +2869,14 @@
 {
 	struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
 	unsigned long flags;
-	u32 imask;
+	u32 imask, ievent;
+
+	ievent = gfar_read(&grp->regs->ievent);
+
+	if (unlikely(ievent & IEVENT_FGPI)) {
+		gfar_write(&grp->regs->ievent, IEVENT_FGPI);
+		return IRQ_HANDLED;
+	}
 
 	if (likely(napi_schedule_prep(&grp->napi_rx))) {
 		spin_lock_irqsave(&grp->grplock, flags);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 8c19948..f266b20 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -340,6 +340,7 @@
 #define IEVENT_MAG		0x00000800
 #define IEVENT_GRSC		0x00000100
 #define IEVENT_RXF0		0x00000080
+#define IEVENT_FGPI		0x00000010
 #define IEVENT_FIR		0x00000008
 #define IEVENT_FIQ		0x00000004
 #define IEVENT_DPE		0x00000002
@@ -372,6 +373,7 @@
 #define IMASK_MAG		0x00000800
 #define IMASK_GRSC              0x00000100
 #define IMASK_RXFEN0		0x00000080
+#define IMASK_FGPI		0x00000010
 #define IMASK_FIR		0x00000008
 #define IMASK_FIQ		0x00000004
 #define IMASK_DPE		0x00000002
@@ -540,6 +542,9 @@
 
 #define GFAR_INT_NAME_MAX	(IFNAMSIZ + 6)	/* '_g#_xx' */
 
+#define GFAR_WOL_MAGIC		0x00000001
+#define GFAR_WOL_FILER_UCAST	0x00000002
+
 struct txbd8
 {
 	union {
@@ -917,6 +922,7 @@
 #define FSL_GIANFAR_DEV_HAS_BD_STASHING		0x00000200
 #define FSL_GIANFAR_DEV_HAS_BUF_STASHING	0x00000400
 #define FSL_GIANFAR_DEV_HAS_TIMER		0x00000800
+#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER	0x00001000
 
 #if (MAXGROUPS == 2)
 #define DEFAULT_MAPPING 	0xAA
@@ -1161,8 +1167,6 @@
 		extended_hash:1,
 		bd_stash_en:1,
 		rx_filer_enable:1,
-		/* Wake-on-LAN enabled */
-		wol_en:1,
 		/* Enable priorty based Tx scheduling in Hw */
 		prio_sched_en:1,
 		/* Flow control flags */
@@ -1191,6 +1195,10 @@
 	u32 __iomem *hash_regs[16];
 	int hash_width;
 
+	/* wake-on-lan settings */
+	u16 wol_opts;
+	u16 wol_supported;
+
 	/*Filer table*/
 	unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
 	unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 6bdc891..928ca2b 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -182,8 +182,6 @@
 		sizeof(drvinfo->version));
 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
-	drvinfo->regdump_len = 0;
-	drvinfo->eedump_len = 0;
 }
 
 
@@ -644,28 +642,49 @@
 {
 	struct gfar_private *priv = netdev_priv(dev);
 
-	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
-		wol->supported = WAKE_MAGIC;
-		wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
-	} else {
-		wol->supported = wol->wolopts = 0;
-	}
+	wol->supported = 0;
+	wol->wolopts = 0;
+
+	if (priv->wol_supported & GFAR_WOL_MAGIC)
+		wol->supported |= WAKE_MAGIC;
+
+	if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
+		wol->supported |= WAKE_UCAST;
+
+	if (priv->wol_opts & GFAR_WOL_MAGIC)
+		wol->wolopts |= WAKE_MAGIC;
+
+	if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
+		wol->wolopts |= WAKE_UCAST;
 }
 
 static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
 	struct gfar_private *priv = netdev_priv(dev);
+	u16 wol_opts = 0;
+	int err;
 
-	if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
-	    wol->wolopts != 0)
+	if (!priv->wol_supported && wol->wolopts)
 		return -EINVAL;
 
-	if (wol->wolopts & ~WAKE_MAGIC)
+	if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
 		return -EINVAL;
 
-	device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+	if (wol->wolopts & WAKE_MAGIC) {
+		wol_opts |= GFAR_WOL_MAGIC;
+	} else {
+		if (wol->wolopts & WAKE_UCAST)
+			wol_opts |= GFAR_WOL_FILER_UCAST;
+	}
 
-	priv->wol_en = !!device_may_wakeup(&dev->dev);
+	wol_opts &= priv->wol_supported;
+	priv->wol_opts = 0;
+
+	err = device_set_wakeup_enable(priv->dev, wol_opts);
+	if (err)
+		return err;
+
+	priv->wol_opts = wol_opts;
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index cc83350..89714f5 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -351,8 +351,6 @@
 	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
-	drvinfo->eedump_len = 0;
-	drvinfo->regdump_len = uec_get_regs_len(netdev);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 165b5a8..f250dec 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -5,7 +5,7 @@
 config NET_VENDOR_HISILICON
 	bool "Hisilicon devices"
 	default y
-	depends on ARM || ARM64
+	depends on OF && (ARM || ARM64 || COMPILE_TEST)
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y.
 
@@ -24,7 +24,6 @@
 
 config HIP04_ETH
 	tristate "HISILICON P04 Ethernet support"
-	select PHYLIB
 	select MARVELL_PHY
 	select MFD_SYSCON
 	select HNS_MDIO
@@ -33,8 +32,8 @@
 	  want to use the internal ethernet then you should answer Y to this.
 
 config HNS_MDIO
-	tristate "Hisilicon HNS MDIO device Support"
-	select MDIO
+	tristate
+	select PHYLIB
 	---help---
 	  This selects the HNS MDIO support. It is needed by HNS_DSAF to access
 	  the PHY
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index cc2d8b4..253f8ed 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -816,7 +816,7 @@
 	struct net_device *ndev;
 	struct hip04_priv *priv;
 	struct resource *res;
-	unsigned int irq;
+	int irq;
 	int ret;
 
 	ndev = alloc_etherdev(sizeof(struct hip04_priv));
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index a5e077e..e51892d 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -371,7 +371,7 @@
 
 static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
 {
-	writel_relaxed(~(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
+	writel_relaxed(~(u32)(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
 	writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 0a0a9e8..b364529 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -436,60 +436,10 @@
 }
 EXPORT_SYMBOL(hnae_ae_unregister);
 
-static ssize_t handles_show(struct device *dev,
-			    struct device_attribute *attr, char *buf)
-{
-	ssize_t s = 0;
-	struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
-	struct hnae_handle *h;
-	int i = 0, j;
-
-	list_for_each_entry_rcu(h, &hdev->handle_list, node) {
-		s += sprintf(buf + s, "handle %d (eport_id=%u from %s):\n",
-			    i++, h->eport_id, h->dev->name);
-		for (j = 0; j < h->q_num; j++) {
-			s += sprintf(buf + s, "\tqueue[%d] on 0x%llx\n",
-				     j, (u64)h->qs[i]->io_base);
-#define HANDEL_TX_MSG "\t\ttx_ring on 0x%llx:%u,%u,%u,%u,%u,%llu,%llu\n"
-			s += sprintf(buf + s,
-				     HANDEL_TX_MSG,
-				     (u64)h->qs[i]->tx_ring.io_base,
-				     h->qs[i]->tx_ring.buf_size,
-				     h->qs[i]->tx_ring.desc_num,
-				     h->qs[i]->tx_ring.max_desc_num_per_pkt,
-				     h->qs[i]->tx_ring.max_raw_data_sz_per_desc,
-				     h->qs[i]->tx_ring.max_pkt_size,
-				 h->qs[i]->tx_ring.stats.sw_err_cnt,
-				 h->qs[i]->tx_ring.stats.io_err_cnt);
-			s += sprintf(buf + s,
-				"\t\trx_ring on 0x%llx:%u,%u,%llu,%llu,%llu\n",
-				(u64)h->qs[i]->rx_ring.io_base,
-				h->qs[i]->rx_ring.buf_size,
-				h->qs[i]->rx_ring.desc_num,
-				h->qs[i]->rx_ring.stats.sw_err_cnt,
-				h->qs[i]->rx_ring.stats.io_err_cnt,
-				h->qs[i]->rx_ring.stats.seg_pkt_cnt);
-		}
-	}
-
-	return s;
-}
-
-static DEVICE_ATTR_RO(handles);
-static struct attribute *hnae_class_attrs[] = {
-	&dev_attr_handles.attr,
-	NULL,
-};
-ATTRIBUTE_GROUPS(hnae_class);
-
 static int __init hnae_init(void)
 {
 	hnae_class = class_create(THIS_MODULE, "hnae");
-	if (IS_ERR(hnae_class))
-		return PTR_ERR(hnae_class);
-
-	hnae_class->dev_groups = hnae_class_groups;
-	return 0;
+	return PTR_ERR_OR_ZERO(hnae_class);
 }
 
 static void __exit hnae_exit(void)
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 5edd8cd..cec95ac 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/notifier.h>
+#include <linux/phy.h>
 #include <linux/types.h>
 
 #define HNAE_DRIVER_VERSION "1.3.0"
@@ -429,6 +430,7 @@
 	void (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
 	int (*set_coalesce_frames)(struct hnae_handle *handle,
 				   u32 coalesce_frames);
+	void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
 	int (*get_mac_addr)(struct hnae_handle *handle, void **p);
 	int (*set_mac_addr)(struct hnae_handle *handle, void *p);
 	int (*set_mc_addr)(struct hnae_handle *handle, void *addr);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index a2c72f8..1a16c03 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -392,6 +392,11 @@
 	return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable);
 }
 
+static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
+{
+	hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en);
+}
+
 static int hns_ae_get_autoneg(struct hnae_handle *handle)
 {
 	u32     auto_neg;
@@ -748,6 +753,7 @@
 	.get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
 	.set_coalesce_usecs = hns_ae_set_coalesce_usecs,
 	.set_coalesce_frames = hns_ae_set_coalesce_frames,
+	.set_promisc_mode = hns_ae_set_promisc_mode,
 	.set_mac_addr = hns_ae_set_mac_address,
 	.set_mc_addr = hns_ae_set_multicast_one,
 	.set_mtu = hns_ae_set_mtu,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 95bf42a..026b386 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -179,7 +179,7 @@
 			return -EINVAL;
 		}
 	} else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
-		if (mac_cb->mac_id <= DSAF_MAX_PORT_NUM_PER_CHIP) {
+		if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM_PER_CHIP) {
 			dev_err(mac_cb->dev,
 				"input invalid,%s mac%d vmid%d!\n",
 				mac_cb->dsaf_dev->ae_dev.name,
@@ -744,9 +744,11 @@
 	mac_cb->serdes_vaddr = dsaf_dev->sds_base;
 
 	if (dsaf_dev->cpld_base &&
-	    mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+	    mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
 		mac_cb->cpld_vaddr = dsaf_dev->cpld_base +
 			mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET;
+		cpld_led_reset(mac_cb);
+	}
 	mac_cb->sfp_prsnt = 0;
 	mac_cb->txpkt_for_led = 0;
 	mac_cb->rxpkt_for_led = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 26ae6c6..2a98eba 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -17,6 +17,8 @@
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/device.h>
+#include <linux/vmalloc.h>
+
 #include "hns_dsaf_main.h"
 #include "hns_dsaf_rcb.h"
 #include "hns_dsaf_ppe.h"
@@ -217,6 +219,25 @@
 	}
 }
 
+static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
+{
+	u16 max_q_per_vf, max_vfn;
+	u32 q_id, q_num_per_port;
+	u32 i;
+
+	hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
+			       HNS_DSAF_COMM_SERVICE_NW_IDX,
+			       &max_vfn, &max_q_per_vf);
+	q_num_per_port = max_vfn * max_q_per_vf;
+
+	for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) {
+		dsaf_set_dev_field(dsaf_dev,
+				   DSAF_MIX_DEF_QID_0_REG + 0x0004 * i,
+				   0xff, 0, q_id);
+		q_id += q_num_per_port;
+	}
+}
+
 /**
  * hns_dsaf_sw_port_type_cfg - cfg sw type
  * @dsaf_id: dsa fabric id
@@ -592,6 +613,11 @@
 	dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
 }
 
+void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
+{
+	dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
+}
+
 /**
  * hns_dsaf_tbl_stat_en - tbl
  * @dsaf_id: dsa fabric id
@@ -920,6 +946,9 @@
 	/* set 22 queue per tx ppe engine, only used in switch mode */
 	hns_dsaf_ppe_qid_cfg(dsaf_dev, DSAF_DEFAUTL_QUEUE_NUM_PER_PPE);
 
+	/* set promisc def queue id */
+	hns_dsaf_mix_def_qid_cfg(dsaf_dev);
+
 	/* in non switch mode, set all port to access mode */
 	hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 315b07e..b2b9348 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -423,5 +423,6 @@
 
 void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
 int hns_dsaf_get_regs_count(void);
+void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
 
 #endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index d611388..523e9b8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -64,17 +64,10 @@
 	switch (status) {
 	case HNAE_LED_ACTIVE:
 		mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr);
-		return 2;
-	case HNAE_LED_ON:
 		dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
 			     CPLD_LED_ON_VALUE);
 		dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
-		break;
-	case HNAE_LED_OFF:
-		dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
-			     CPLD_LED_DEFAULT_VALUE);
-		dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
-		break;
+		return 2;
 	case HNAE_LED_INACTIVE:
 		dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
 			     CPLD_LED_DEFAULT_VALUE);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 05ea244..4db32c6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -575,8 +575,8 @@
  *@max_vfn : max vfn number
  *@max_q_per_vf:max ring number per vm
  */
-static void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
-				   u16 *max_vfn, u16 *max_q_per_vf)
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+			    u16 *max_vfn, u16 *max_q_per_vf)
 {
 	if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
 		switch (dsaf_mode) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index c7db613..3a2afe2 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -107,6 +107,8 @@
 void hns_rcb_start(struct hnae_queue *q, u32 val);
 void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
 void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+			    u16 *max_vfn, u16 *max_q_per_vf);
 
 void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val);
 void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index dab5ecf..802d554 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -51,9 +51,9 @@
 	{"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)},
 	{"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)},
 
-	{"xgmac_rx_not_well_pkt", MAC_STATS_FIELD_OFF(rx_fragment_err)},
-	{"xgmac_rx_good_well_pkt", MAC_STATS_FIELD_OFF(rx_undersize)},
-	{"xgmac_rx_total_pkt", MAC_STATS_FIELD_OFF(rx_under_min)},
+	{"xgmac_rx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(rx_fragment_err)},
+	{"xgmac_rx_good_pkts_minto64", MAC_STATS_FIELD_OFF(rx_undersize)},
+	{"xgmac_rx_total_pkts_minto64", MAC_STATS_FIELD_OFF(rx_under_min)},
 	{"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)},
 	{"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
 	{"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index ce7f2e0..302d3ae 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1161,6 +1161,21 @@
 	}
 }
 
+void hns_nic_set_rx_mode(struct net_device *ndev)
+{
+	struct hns_nic_priv *priv = netdev_priv(ndev);
+	struct hnae_handle *h = priv->ae_handle;
+
+	if (h->dev->ops->set_promisc_mode) {
+		if (ndev->flags & IFF_PROMISC)
+			h->dev->ops->set_promisc_mode(h, 1);
+		else
+			h->dev->ops->set_promisc_mode(h, 0);
+	}
+
+	hns_set_multicast_list(ndev);
+}
+
 struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
 					      struct rtnl_link_stats64 *stats)
 {
@@ -1220,7 +1235,7 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = hns_nic_poll_controller,
 #endif
-	.ndo_set_rx_mode = hns_set_multicast_list,
+	.ndo_set_rx_mode = hns_nic_set_rx_mode,
 };
 
 static void hns_nic_update_link_status(struct net_device *netdev)
@@ -1300,16 +1315,15 @@
 		return;
 
 	hns_nic_dump(priv);
-	netdev_err(priv->netdev, "Reset %s port\n",
-		   (type == HNAE_PORT_DEBUG ? "debug" : "business"));
+	netdev_info(priv->netdev, "Reset %s port\n",
+		    (type == HNAE_PORT_DEBUG ? "debug" : "business"));
 
 	rtnl_lock();
-	if (type == HNAE_PORT_DEBUG) {
+	/* put off any impending NetWatchDogTimeout */
+	priv->netdev->trans_start = jiffies;
+
+	if (type == HNAE_PORT_DEBUG)
 		hns_nic_net_reinit(priv->netdev);
-	} else {
-		hns_nic_net_down(priv->netdev);
-		hns_nic_net_reset(priv->netdev);
-	}
 	rtnl_unlock();
 }
 
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 2550208..a033212 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -194,9 +194,7 @@
 {
 	struct hns_nic_priv *priv = netdev_priv(net_dev);
 	struct hnae_handle *h;
-	int link_stat;
 	u32 speed;
-	u8 duplex, autoneg;
 
 	if (!netif_running(net_dev))
 		return -ESRCH;
@@ -206,48 +204,35 @@
 		return -ENODEV;
 
 	h = priv->ae_handle;
-	link_stat = hns_nic_get_link(net_dev);
-	duplex = cmd->duplex;
 	speed = ethtool_cmd_speed(cmd);
-	autoneg = cmd->autoneg;
-
-	if (!link_stat) {
-		if (duplex != (u8)DUPLEX_UNKNOWN || speed != (u32)SPEED_UNKNOWN)
-			return -EINVAL;
-
-		if (h->phy_if == PHY_INTERFACE_MODE_SGMII && h->phy_node) {
-			priv->phy->autoneg = autoneg;
-			return phy_start_aneg(priv->phy);
-		}
-	}
 
 	if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
-		if (autoneg != AUTONEG_DISABLE)
-			return -EINVAL;
-
-		if (speed != SPEED_10000 || duplex != DUPLEX_FULL)
+		if (cmd->autoneg == AUTONEG_ENABLE || speed != SPEED_10000 ||
+		    cmd->duplex != DUPLEX_FULL)
 			return -EINVAL;
 	} else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
-		if (!h->phy_node && autoneg != AUTONEG_DISABLE)
+		if (!priv->phy && cmd->autoneg == AUTONEG_ENABLE)
 			return -EINVAL;
 
-		if (speed == SPEED_1000 && duplex == DUPLEX_HALF)
+		if (speed == SPEED_1000 && cmd->duplex == DUPLEX_HALF)
 			return -EINVAL;
+		if (priv->phy)
+			return phy_ethtool_sset(priv->phy, cmd);
 
-		if (speed != SPEED_10 && speed != SPEED_100 &&
-		    speed != SPEED_1000)
+		if ((speed != SPEED_10 && speed != SPEED_100 &&
+		     speed != SPEED_1000) || (cmd->duplex != DUPLEX_HALF &&
+		     cmd->duplex != DUPLEX_FULL))
 			return -EINVAL;
 	} else {
 		netdev_err(net_dev, "Not supported!");
 		return -ENOTSUPP;
 	}
 
-	if (priv->phy) {
-		return phy_ethtool_sset(priv->phy, cmd);
-	} else if (h->dev->ops->adjust_link && link_stat) {
-		h->dev->ops->adjust_link(h, speed, duplex);
+	if (h->dev->ops->adjust_link) {
+		h->dev->ops->adjust_link(h, (int)speed, cmd->duplex);
 		return 0;
 	}
+
 	netdev_err(net_dev, "Not supported!");
 	return -ENOTSUPP;
 }
@@ -682,7 +667,6 @@
 	drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
 
 	strncpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN);
-	drvinfo->eedump_len = 0;
 }
 
 /**
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index e4ec52a..37491c8 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -11,6 +11,7 @@
 #include <linux/etherdevice.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/netdevice.h>
@@ -20,6 +21,7 @@
 #include <linux/of_platform.h>
 #include <linux/phy.h>
 #include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/spinlock_types.h>
 
 #define MDIO_DRV_NAME "Hi-HNS_MDIO"
@@ -36,7 +38,7 @@
 
 struct hns_mdio_device {
 	void *vbase;		/* mdio reg base address */
-	void *sys_vbase;
+	struct regmap *subctrl_vbase;
 };
 
 /* mdio reg */
@@ -155,10 +157,10 @@
 	u32 time_cnt;
 	u32 reg_value;
 
-	mdio_write_reg((void *)mdio_dev->sys_vbase, cfg_reg, set_val);
+	regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
 
 	for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
-		reg_value = mdio_read_reg((void *)mdio_dev->sys_vbase, st_reg);
+		regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
 		reg_value &= st_msk;
 		if ((!!check_st) == (!!reg_value))
 			break;
@@ -352,7 +354,7 @@
 	struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
 	int ret;
 
-	if (!mdio_dev->sys_vbase) {
+	if (!mdio_dev->subctrl_vbase) {
 		dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
 		return -ENODEV;
 	}
@@ -455,13 +457,12 @@
 		return ret;
 	}
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	mdio_dev->sys_vbase = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(mdio_dev->sys_vbase)) {
-		ret = PTR_ERR(mdio_dev->sys_vbase);
-		return ret;
+	mdio_dev->subctrl_vbase =
+		syscon_node_to_regmap(of_parse_phandle(np, "subctrl_vbase", 0));
+	if (IS_ERR(mdio_dev->subctrl_vbase)) {
+		dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n");
+		mdio_dev->subctrl_vbase = NULL;
 	}
-
 	new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR,
 				    sizeof(int), GFP_KERNEL);
 	if (!new_bus->irq)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index b60a34d..5d7db6c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2204,7 +2204,6 @@
 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 	snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
 		 dev->cell_index, dev->ofdev->dev.of_node->full_name);
-	info->regdump_len = emac_ethtool_get_regs_len(ndev);
 }
 
 static const struct ethtool_ops emac_ethtool_ops = {
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index f379e47..93ae114 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -460,8 +460,8 @@
 	u32 index;
 };
 
-#define EMAC_ETHTOOL_REGS_VER		0
-#define EMAC4_ETHTOOL_REGS_VER		1
-#define EMAC4SYNC_ETHTOOL_REGS_VER	2
+#define EMAC_ETHTOOL_REGS_VER		3
+#define EMAC4_ETHTOOL_REGS_VER		4
+#define EMAC4SYNC_ETHTOOL_REGS_VER	5
 
 #endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 4270ad2..83e557c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -559,8 +559,6 @@
 
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->regdump_len = e1000_get_regs_len(netdev);
-	drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
 
 static void e1000_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 74dc150..fd7be86 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3820,7 +3820,7 @@
 	if (work_done < budget) {
 		if (likely(adapter->itr_setting & 3))
 			e1000_set_itr(adapter);
-		napi_complete(napi);
+		napi_complete_done(napi, work_done);
 		if (!test_bit(__E1000_DOWN, &adapter->flags))
 			e1000_irq_enable(adapter);
 	}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index ad6daa6..6cab1f3 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -648,8 +648,6 @@
 
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->regdump_len = e1000_get_regs_len(netdev);
-	drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
 
 static void e1000_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 2e2ddec0..0a854a4 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2693,7 +2693,7 @@
 	if (work_done < weight) {
 		if (adapter->itr_setting & 3)
 			e1000_set_itr(adapter);
-		napi_complete(napi);
+		napi_complete_done(napi, work_done);
 		if (!test_bit(__E1000_DOWN, &adapter->state)) {
 			if (adapter->msix_entries)
 				ew32(IMS, adapter->rx_ring->ims_val);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 08ecf43..5304bc1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -176,7 +176,7 @@
 		return;
 
 	/* Generate a folder for each q_vector */
-	sprintf(name, "q_vector.%03d", q_vector->v_idx);
+	snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx);
 
 	q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc);
 	if (!q_vector->dbg_q_vector)
@@ -186,7 +186,7 @@
 	for (i = 0; i < q_vector->tx.count; i++) {
 		struct fm10k_ring *ring = &q_vector->tx.ring[i];
 
-		sprintf(name, "tx_ring.%03d", ring->queue_index);
+		snprintf(name, sizeof(name), "tx_ring.%03d", ring->queue_index);
 
 		debugfs_create_file(name, 0600,
 				    q_vector->dbg_q_vector, ring,
@@ -197,7 +197,7 @@
 	for (i = 0; i < q_vector->rx.count; i++) {
 		struct fm10k_ring *ring = &q_vector->rx.ring[i];
 
-		sprintf(name, "rx_ring.%03d", ring->queue_index);
+		snprintf(name, sizeof(name), "rx_ring.%03d", ring->queue_index);
 
 		debugfs_create_file(name, 0600,
 				    q_vector->dbg_q_vector, ring,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 4ef2fbd..2ce0eba 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -206,13 +206,13 @@
 	}
 
 	for (i = 0; i < interface->hw.mac.max_queues; i++) {
-		sprintf(p, "tx_queue_%u_packets", i);
+		snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i);
 		p += ETH_GSTRING_LEN;
-		sprintf(p, "tx_queue_%u_bytes", i);
+		snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
 		p += ETH_GSTRING_LEN;
-		sprintf(p, "rx_queue_%u_packets", i);
+		snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i);
 		p += ETH_GSTRING_LEN;
-		sprintf(p, "rx_queue_%u_bytes", i);
+		snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
 		p += ETH_GSTRING_LEN;
 	}
 }
@@ -515,10 +515,6 @@
 		sizeof(info->version) - 1);
 	strncpy(info->bus_info, pci_name(interface->pdev),
 		sizeof(info->bus_info) - 1);
-
-	info->n_stats = fm10k_get_sset_count(dev, ETH_SS_STATS);
-
-	info->regdump_len = fm10k_get_regs_len(dev);
 }
 
 static void fm10k_get_pauseparam(struct net_device *dev,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 2f47bfe..e76a44c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -593,9 +593,9 @@
 	napi_gro_receive(&q_vector->napi, skb);
 }
 
-static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
-			       struct fm10k_ring *rx_ring,
-			       int budget)
+static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
+			      struct fm10k_ring *rx_ring,
+			      int budget)
 {
 	struct sk_buff *skb = rx_ring->skb;
 	unsigned int total_bytes = 0, total_packets = 0;
@@ -662,7 +662,7 @@
 	q_vector->rx.total_packets += total_packets;
 	q_vector->rx.total_bytes += total_bytes;
 
-	return total_packets < budget;
+	return total_packets;
 }
 
 #define VXLAN_HLEN (sizeof(struct udphdr) + 8)
@@ -1422,7 +1422,7 @@
 	struct fm10k_q_vector *q_vector =
 			       container_of(napi, struct fm10k_q_vector, napi);
 	struct fm10k_ring *ring;
-	int per_ring_budget;
+	int per_ring_budget, work_done = 0;
 	bool clean_complete = true;
 
 	fm10k_for_each_ring(ring, q_vector->tx)
@@ -1436,16 +1436,19 @@
 	else
 		per_ring_budget = budget;
 
-	fm10k_for_each_ring(ring, q_vector->rx)
-		clean_complete &= fm10k_clean_rx_irq(q_vector, ring,
-						     per_ring_budget);
+	fm10k_for_each_ring(ring, q_vector->rx) {
+		int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
+
+		work_done += work;
+		clean_complete &= !!(work < per_ring_budget);
+	}
 
 	/* If all work not completed, return budget and keep polling */
 	if (!clean_complete)
 		return budget;
 
 	/* all work done, exit the polling mode */
-	napi_complete(napi);
+	napi_complete_done(napi, work_done);
 
 	/* re-enable the q_vector */
 	fm10k_qv_enable(q_vector);
@@ -1905,7 +1908,7 @@
 	u32 reta, base;
 
 	/* If the netdev is initialized we have to maintain table if possible */
-	if (interface->netdev->reg_state) {
+	if (interface->netdev->reg_state != NETREG_UNINITIALIZED) {
 		for (i = FM10K_RETA_SIZE; i--;) {
 			reta = interface->reta[i];
 			if ((((reta << 24) >> 24) < rss_i) &&
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index c64d18d..4dd3e26 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -71,7 +71,6 @@
 #define I40E_MAX_VEB          16
 
 #define I40E_MAX_NUM_DESCRIPTORS      4096
-#define I40E_MAX_REGISTER     0x800000
 #define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
 #define I40E_DEFAULT_NUM_DESCRIPTORS  512
 #define I40E_REQ_DESCRIPTOR_MULTIPLE  32
@@ -94,19 +93,26 @@
 #endif /* I40E_FCOE */
 #define I40E_MAX_AQ_BUF_SIZE          4096
 #define I40E_AQ_LEN                   256
-#define I40E_AQ_WORK_LIMIT            32
+#define I40E_AQ_WORK_LIMIT            66 /* max number of VFs + a little */
 #define I40E_MAX_USER_PRIORITY        8
 #define I40E_DEFAULT_MSG_ENABLE       4
 #define I40E_QUEUE_WAIT_RETRY_LIMIT   10
-#define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 9)
+#define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 16)
 
 /* Ethtool Private Flags */
 #define I40E_PRIV_FLAGS_NPAR_FLAG	BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG	BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR		BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS	BIT(3)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
 #define I40E_NVM_VERSION_HI_SHIFT  12
 #define I40E_NVM_VERSION_HI_MASK   (0xf << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_OEM_VER_BUILD_MASK    0xffff
+#define I40E_OEM_VER_PATCH_MASK    0xff
+#define I40E_OEM_VER_BUILD_SHIFT   8
+#define I40E_OEM_VER_SHIFT         24
 
 /* The values in here are decimal coded as hex as is the case in the NVM map*/
 #define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -304,7 +310,6 @@
 #ifdef I40E_FCOE
 #define I40E_FLAG_FCOE_ENABLED			BIT_ULL(11)
 #endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL			BIT_ULL(12)
 #define I40E_FLAG_16BYTE_RX_DESC_ENABLED	BIT_ULL(13)
 #define I40E_FLAG_CLEAN_ADMINQ			BIT_ULL(14)
 #define I40E_FLAG_FILTER_SYNC			BIT_ULL(15)
@@ -326,8 +331,11 @@
 #define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE	BIT_ULL(33)
 #define I40E_FLAG_128_QP_RSS_CAPABLE		BIT_ULL(34)
 #define I40E_FLAG_WB_ON_ITR_CAPABLE		BIT_ULL(35)
+#define I40E_FLAG_VEB_STATS_ENABLED		BIT_ULL(37)
 #define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE	BIT_ULL(38)
+#define I40E_FLAG_LINK_POLLING_ENABLED		BIT_ULL(39)
 #define I40E_FLAG_VEB_MODE_ENABLED		BIT_ULL(40)
+#define I40E_FLAG_NO_PCI_LINK_CHECK		BIT_ULL(42)
 
 	/* tracks features that get auto disabled by errors */
 	u64 auto_disable_flags;
@@ -408,6 +416,9 @@
 	/* These are only valid in NPAR modes */
 	u32 npar_max_bw;
 	u32 npar_min_bw;
+
+	u32 ioremap_len;
+	u32 fd_inv;
 };
 
 struct i40e_mac_filter {
@@ -459,6 +470,8 @@
 #define I40E_VSI_FLAG_VEB_OWNER		BIT(1)
 	unsigned long flags;
 
+	/* Per VSI lock to protect elements/list (MAC filter) */
+	spinlock_t mac_filter_list_lock;
 	struct list_head mac_filter_list;
 
 	/* VSI stats */
@@ -473,6 +486,7 @@
 #endif
 	u32 tx_restart;
 	u32 tx_busy;
+	u64 tx_linearize;
 	u32 rx_buf_failed;
 	u32 rx_page_failed;
 
@@ -488,6 +502,7 @@
 	 */
 	u16 rx_itr_setting;
 	u16 tx_itr_setting;
+	u16 int_rate_limit;  /* value in usecs */
 
 	u16 rss_table_size;
 	u16 rss_size;
@@ -533,6 +548,7 @@
 	u16 idx;               /* index in pf->vsi[] */
 	u16 veb_idx;           /* index of VEB parent */
 	struct kobject *kobj;  /* sysfs object */
+	bool current_isup;     /* Sync 'link up' logging */
 
 	/* VSI specific handlers */
 	irqreturn_t (*irq_handler)(int irq, void *data);
@@ -563,6 +579,8 @@
 	struct rcu_head rcu;	/* to avoid race with update stats on free */
 	char name[I40E_INT_NAME_STR_LEN];
 	bool arm_wb_state;
+#define ITR_COUNTDOWN_START 100
+	u8 itr_countdown;	/* when 0 should adjust ITR */
 } ____cacheline_internodealigned_in_smp;
 
 /* lan device */
@@ -572,22 +590,29 @@
 };
 
 /**
- * i40e_fw_version_str - format the FW and NVM version strings
+ * i40e_nvm_version_str - format the NVM version strings
  * @hw: ptr to the hardware info
  **/
-static inline char *i40e_fw_version_str(struct i40e_hw *hw)
+static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
 {
 	static char buf[32];
+	u32 full_ver;
+	u8 ver, patch;
+	u16 build;
+
+	full_ver = hw->nvm.oem_ver;
+	ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
+	build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT)
+		 & I40E_OEM_VER_BUILD_MASK);
+	patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
 
 	snprintf(buf, sizeof(buf),
-		 "f%d.%d.%05d a%d.%d n%x.%02x e%x",
-		 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
-		 hw->aq.api_maj_ver, hw->aq.api_min_ver,
+		 "%x.%02x 0x%x %d.%d.%d",
 		 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
 			I40E_NVM_VERSION_HI_SHIFT,
 		 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
 			I40E_NVM_VERSION_LO_SHIFT,
-		 (hw->nvm.eetrack & 0xffffff));
+		 hw->nvm.eetrack, ver, build, patch);
 
 	return buf;
 }
@@ -699,7 +724,24 @@
 static inline void i40e_dbg_init(void) {}
 static inline void i40e_dbg_exit(void) {}
 #endif /* CONFIG_DEBUG_FS*/
-void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+/**
+ * i40e_irq_dynamic_enable - Enable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector, without base_vector
+ **/
+static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
+{
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	u32 val;
+
+	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+	      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+	wr32(hw, I40E_PFINT_DYN_CTLN(vector + vsi->base_vector - 1), val);
+	/* skip the flush */
+}
+
 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
@@ -738,7 +780,7 @@
 u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
 void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
 void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
-int i40e_init_pf_fcoe(struct i40e_pf *pf);
+void i40e_init_pf_fcoe(struct i40e_pf *pf);
 int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
 void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
 int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
@@ -770,4 +812,5 @@
 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
 #endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 199275d..0ff8f01 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -386,7 +386,6 @@
 
 	hw->aq.asq.next_to_use = 0;
 	hw->aq.asq.next_to_clean = 0;
-	hw->aq.asq.count = hw->aq.num_asq_entries;
 
 	/* allocate the ring memory */
 	ret_code = i40e_alloc_adminq_asq_ring(hw);
@@ -404,6 +403,7 @@
 		goto init_adminq_free_rings;
 
 	/* success! */
+	hw->aq.asq.count = hw->aq.num_asq_entries;
 	goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -445,7 +445,6 @@
 
 	hw->aq.arq.next_to_use = 0;
 	hw->aq.arq.next_to_clean = 0;
-	hw->aq.arq.count = hw->aq.num_arq_entries;
 
 	/* allocate the ring memory */
 	ret_code = i40e_alloc_adminq_arq_ring(hw);
@@ -463,6 +462,7 @@
 		goto init_adminq_free_rings;
 
 	/* success! */
+	hw->aq.arq.count = hw->aq.num_arq_entries;
 	goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -482,8 +482,12 @@
 {
 	i40e_status ret_code = 0;
 
-	if (hw->aq.asq.count == 0)
-		return I40E_ERR_NOT_READY;
+	mutex_lock(&hw->aq.asq_mutex);
+
+	if (hw->aq.asq.count == 0) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto shutdown_asq_out;
+	}
 
 	/* Stop firmware AdminQ processing */
 	wr32(hw, hw->aq.asq.head, 0);
@@ -492,16 +496,13 @@
 	wr32(hw, hw->aq.asq.bal, 0);
 	wr32(hw, hw->aq.asq.bah, 0);
 
-	/* make sure lock is available */
-	mutex_lock(&hw->aq.asq_mutex);
-
 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
 
 	/* free ring buffers */
 	i40e_free_asq_bufs(hw);
 
+shutdown_asq_out:
 	mutex_unlock(&hw->aq.asq_mutex);
-
 	return ret_code;
 }
 
@@ -515,8 +516,12 @@
 {
 	i40e_status ret_code = 0;
 
-	if (hw->aq.arq.count == 0)
-		return I40E_ERR_NOT_READY;
+	mutex_lock(&hw->aq.arq_mutex);
+
+	if (hw->aq.arq.count == 0) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto shutdown_arq_out;
+	}
 
 	/* Stop firmware AdminQ processing */
 	wr32(hw, hw->aq.arq.head, 0);
@@ -525,16 +530,13 @@
 	wr32(hw, hw->aq.arq.bal, 0);
 	wr32(hw, hw->aq.arq.bah, 0);
 
-	/* make sure lock is available */
-	mutex_lock(&hw->aq.arq_mutex);
-
 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
 
 	/* free ring buffers */
 	i40e_free_arq_bufs(hw);
 
+shutdown_arq_out:
 	mutex_unlock(&hw->aq.arq_mutex);
-
 	return ret_code;
 }
 
@@ -551,8 +553,9 @@
  **/
 i40e_status i40e_init_adminq(struct i40e_hw *hw)
 {
-	i40e_status ret_code;
+	u16 cfg_ptr, oem_hi, oem_lo;
 	u16 eetrack_lo, eetrack_hi;
+	i40e_status ret_code;
 	int retry = 0;
 
 	/* verify input for valid configuration */
@@ -611,6 +614,12 @@
 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
 	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+	i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+			   &oem_hi);
+	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+			   &oem_lo);
+	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
 
 	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
 		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
@@ -681,8 +690,7 @@
 	details = I40E_ADMINQ_DETAILS(*asq, ntc);
 	while (rd32(hw, hw->aq.asq.head) != ntc) {
 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "%s: ntc %d head %d.\n", __func__, ntc,
-			   rd32(hw, hw->aq.asq.head));
+			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
 
 		if (details->callback) {
 			I40E_ADMINQ_CALLBACK cb_func =
@@ -745,19 +753,23 @@
 	u16  retval = 0;
 	u32  val = 0;
 
-	val = rd32(hw, hw->aq.asq.head);
-	if (val >= hw->aq.num_asq_entries) {
-		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: head overrun at %d\n", val);
-		status = I40E_ERR_QUEUE_EMPTY;
-		goto asq_send_command_exit;
-	}
+	mutex_lock(&hw->aq.asq_mutex);
 
 	if (hw->aq.asq.count == 0) {
 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 			   "AQTX: Admin queue not initialized.\n");
 		status = I40E_ERR_QUEUE_EMPTY;
-		goto asq_send_command_exit;
+		goto asq_send_command_error;
+	}
+
+	hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+	val = rd32(hw, hw->aq.asq.head);
+	if (val >= hw->aq.num_asq_entries) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: head overrun at %d\n", val);
+		status = I40E_ERR_QUEUE_EMPTY;
+		goto asq_send_command_error;
 	}
 
 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
@@ -782,8 +794,6 @@
 	desc->flags &= ~cpu_to_le16(details->flags_dis);
 	desc->flags |= cpu_to_le16(details->flags_ena);
 
-	mutex_lock(&hw->aq.asq_mutex);
-
 	if (buff_size > hw->aq.asq_buf_size) {
 		i40e_debug(hw,
 			   I40E_DEBUG_AQ_MESSAGE,
@@ -907,7 +917,6 @@
 
 asq_send_command_error:
 	mutex_unlock(&hw->aq.asq_mutex);
-asq_send_command_exit:
 	return status;
 }
 
@@ -953,6 +962,13 @@
 	/* take the lock before we start messing with the ring */
 	mutex_lock(&hw->aq.arq_mutex);
 
+	if (hw->aq.arq.count == 0) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQRX: Admin queue not initialized.\n");
+		ret_code = I40E_ERR_QUEUE_EMPTY;
+		goto clean_arq_element_err;
+	}
+
 	/* set next_to_use to head */
 	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
 	if (ntu == ntc) {
@@ -1014,6 +1030,8 @@
 	/* Set pending if needed, unlock and return */
 	if (pending != NULL)
 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
 	mutex_unlock(&hw->aq.arq_mutex);
 
 	if (i40e_is_nvm_update_op(&e->desc)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index b67b34c..12fbbdd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -109,9 +109,10 @@
 
 /**
  * i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_rc: AdminQ error code to convert
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
  **/
-static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
+static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
 {
 	int aq_to_posix[] = {
 		0,           /* I40E_AQ_RC_OK */
@@ -143,8 +144,9 @@
 	if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
 		return -EAGAIN;
 
-	if (aq_rc >= ARRAY_SIZE(aq_to_posix))
+	if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
 		return -ERANGE;
+
 	return aq_to_posix[aq_rc];
 }
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 95d23bf..6584b6c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1722,11 +1722,13 @@
 	u8	phy_type;    /* i40e_aq_phy_type   */
 	u8	link_speed;  /* i40e_aq_link_speed */
 	u8	link_info;
-#define I40E_AQ_LINK_UP			0x01
+#define I40E_AQ_LINK_UP			0x01    /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION	0x01
 #define I40E_AQ_LINK_FAULT		0x02
 #define I40E_AQ_LINK_FAULT_TX		0x04
 #define I40E_AQ_LINK_FAULT_RX		0x08
 #define I40E_AQ_LINK_FAULT_REMOTE	0x10
+#define I40E_AQ_LINK_UP_PORT		0x20
 #define I40E_AQ_MEDIA_AVAILABLE		0x40
 #define I40E_AQ_SIGNAL_DETECT		0x80
 	u8	an_info;
@@ -2062,6 +2064,7 @@
 #define I40E_AQC_CEE_APP_ISCSI_MASK	(0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
 #define I40E_AQC_CEE_APP_FIP_SHIFT	0x8
 #define I40E_AQC_CEE_APP_FIP_MASK	(0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
 #define I40E_AQC_CEE_PG_STATUS_SHIFT	0x0
 #define I40E_AQC_CEE_PG_STATUS_MASK	(0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
 #define I40E_AQC_CEE_PFC_STATUS_SHIFT	0x3
@@ -2070,10 +2073,19 @@
 #define I40E_AQC_CEE_APP_STATUS_MASK	(0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
 #define I40E_AQC_CEE_FCOE_STATUS_SHIFT	0x8
 #define I40E_AQC_CEE_FCOE_STATUS_MASK	(0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
-#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT	0xA
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT	0xB
 #define I40E_AQC_CEE_ISCSI_STATUS_MASK	(0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
 #define I40E_AQC_CEE_FIP_STATUS_SHIFT	0x10
 #define I40E_AQC_CEE_FIP_STATUS_MASK	(0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct.  Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
 struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
 	u8	reserved1;
 	u8	oper_num_tc;
@@ -2081,9 +2093,9 @@
 	u8	reserved2;
 	u8	oper_tc_bw[8];
 	u8	oper_pfc_en;
-	u8	reserved3;
+	u8	reserved3[2];
 	__le16	oper_app_prio;
-	u8	reserved4;
+	u8	reserved4[2];
 	__le16	tlv_status;
 };
 
@@ -2120,6 +2132,13 @@
 struct i40e_aqc_lldp_set_local_mib {
 #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT	0
 #define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK	(1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK	(1 << \
+					SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB	0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT	(1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK	(1 << \
+				SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS		0x1
 	u8	type;
 	u8	reserved0;
 	__le16	length;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 114dc64..2d74c6e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -51,7 +51,9 @@
 		case I40E_DEV_ID_QSFP_B:
 		case I40E_DEV_ID_QSFP_C:
 		case I40E_DEV_ID_10G_BASE_T:
+		case I40E_DEV_ID_10G_BASE_T4:
 		case I40E_DEV_ID_20G_KR2:
+		case I40E_DEV_ID_20G_KR2_A:
 			hw->mac.type = I40E_MAC_XL710;
 			break;
 		case I40E_DEV_ID_SFP_X722:
@@ -85,7 +87,7 @@
  * @hw: pointer to the HW structure
  * @aq_err: the AQ error code to convert
  **/
-char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
 {
 	switch (aq_err) {
 	case I40E_AQ_RC_OK:
@@ -145,7 +147,7 @@
  * @hw: pointer to the HW structure
  * @stat_err: the status error code to convert
  **/
-char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
 {
 	switch (stat_err) {
 	case 0:
@@ -329,25 +331,11 @@
 			len = buf_len;
 		/* write the full 16-byte chunks */
 		for (i = 0; i < (len - 16); i += 16)
-			i40e_debug(hw, mask,
-				   "\t0x%04X  %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
-				   i, buf[i], buf[i + 1], buf[i + 2],
-				   buf[i + 3], buf[i + 4], buf[i + 5],
-				   buf[i + 6], buf[i + 7], buf[i + 8],
-				   buf[i + 9], buf[i + 10], buf[i + 11],
-				   buf[i + 12], buf[i + 13], buf[i + 14],
-				   buf[i + 15]);
+			i40e_debug(hw, mask, "\t0x%04X  %16ph\n", i, buf + i);
 		/* write whatever's left over without overrunning the buffer */
-		if (i < len) {
-			char d_buf[80];
-			int j = 0;
-
-			memset(d_buf, 0, sizeof(d_buf));
-			j += sprintf(d_buf, "\t0x%04X ", i);
-			while (i < len)
-				j += sprintf(&d_buf[j], " %02X", buf[i++]);
-			i40e_debug(hw, mask, "%s\n", d_buf);
-		}
+		if (i < len)
+			i40e_debug(hw, mask, "\t0x%04X  %*ph\n",
+					     i, len - i, buf + i);
 	}
 }
 
@@ -441,9 +429,6 @@
 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
 
-	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
-	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
-
 	status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
 
 	return status;
@@ -518,8 +503,6 @@
 					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
 					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
 	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
-	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
-	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
 
 	status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
 
@@ -961,6 +944,9 @@
 	else
 		hw->pf_id = (u8)(func_rid & 0x7);
 
+	if (hw->mac.type == I40E_MAC_X722)
+		hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+
 	status = i40e_init_nvm(hw);
 	return status;
 }
@@ -1038,7 +1024,7 @@
 	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
 
 	if (flags & I40E_AQC_LAN_ADDR_VALID)
-		memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+		ether_addr_copy(mac_addr, addrs.pf_lan_mac);
 
 	return status;
 }
@@ -1061,7 +1047,7 @@
 		return status;
 
 	if (flags & I40E_AQC_PORT_ADDR_VALID)
-		memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+		ether_addr_copy(mac_addr, addrs.port_mac);
 	else
 		status = I40E_ERR_INVALID_MAC_ADDR;
 
@@ -1119,7 +1105,7 @@
 		return status;
 
 	if (flags & I40E_AQC_SAN_ADDR_VALID)
-		memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
+		ether_addr_copy(mac_addr, addrs.pf_san_mac);
 	else
 		status = I40E_ERR_INVALID_MAC_ADDR;
 
@@ -1260,7 +1246,7 @@
 	grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
 		    I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
 		    I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
-	for (cnt = 0; cnt < grst_del + 2; cnt++) {
+	for (cnt = 0; cnt < grst_del + 10; cnt++) {
 		reg = rd32(hw, I40E_GLGEN_RSTAT);
 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
 			break;
@@ -1620,6 +1606,9 @@
 	if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
 		status = I40E_ERR_UNKNOWN_PHY;
 
+	if (report_init)
+		hw->phy.phy_types = le32_to_cpu(abilities->phy_type);
+
 	return status;
 }
 
@@ -1720,14 +1709,14 @@
 			*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
 	}
 	/* Update the link info */
-	status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+	status = i40e_update_link_info(hw);
 	if (status) {
 		/* Wait a little bit (on 40G cards it sometimes takes a really
 		 * long time for link to come back from the atomic reset)
 		 * and try once more
 		 */
 		msleep(1000);
-		status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+		status = i40e_update_link_info(hw);
 	}
 	if (status)
 		*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
@@ -2238,27 +2227,54 @@
 /**
  * i40e_get_link_status - get status of the HW network link
  * @hw: pointer to the hw struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
  *
- * Returns true if link is up, false if link is down.
+ * Variable link_up true if link is up, false if link is down.
+ * The variable link_up is invalid if returned value of status != 0
  *
  * Side effect: LinkStatusEvent reporting becomes enabled
  **/
-bool i40e_get_link_status(struct i40e_hw *hw)
+i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
 {
 	i40e_status status = 0;
-	bool link_status = false;
 
 	if (hw->phy.get_link_info) {
-		status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+		status = i40e_update_link_info(hw);
 
 		if (status)
-			goto i40e_get_link_status_exit;
+			i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+				   status);
 	}
 
-	link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+	*link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
 
-i40e_get_link_status_exit:
-	return link_status;
+	return status;
+}
+
+/**
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+i40e_status i40e_update_link_info(struct i40e_hw *hw)
+{
+	struct i40e_aq_get_phy_abilities_resp abilities;
+	i40e_status status = 0;
+
+	status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+	if (status)
+		return status;
+
+	if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+		status = i40e_aq_get_phy_capabilities(hw, false, false,
+						      &abilities, NULL);
+		if (status)
+			return status;
+
+		memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+		       sizeof(hw->phy.link_info.module_type));
+	}
+
+	return status;
 }
 
 /**
@@ -2365,6 +2381,7 @@
 		*vebs_free = le16_to_cpu(cmd_resp->vebs_free);
 	if (floating) {
 		u16 flags = le16_to_cpu(cmd_resp->veb_flags);
+
 		if (flags & I40E_AQC_ADD_VEB_FLOATING)
 			*floating = true;
 		else
@@ -3779,7 +3796,7 @@
 	}
 
 	if (mac_addr)
-		memcpy(cmd->mac, mac_addr, ETH_ALEN);
+		ether_addr_copy(cmd->mac, mac_addr);
 
 	cmd->etype = cpu_to_le16(ethtype);
 	cmd->flags = cpu_to_le16(flags);
@@ -3798,6 +3815,28 @@
 }
 
 /**
+ * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid to add ethertype filter from
+ **/
+#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+						    u16 seid)
+{
+	u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
+		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
+		   I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
+	u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
+	i40e_status status;
+
+	status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
+						       seid, 0, true, NULL,
+						       NULL);
+	if (status)
+		hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
+}
+
+/**
  * i40e_aq_alternate_read
  * @hw: pointer to the hardware structure
  * @reg_addr0: address of first dword to be read
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 90de46a..2691277 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -292,6 +292,190 @@
 }
 
 /**
+ * i40e_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ **/
+static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
+				     struct i40e_dcbx_config *dcbcfg)
+{
+	struct i40e_dcb_ets_config *etscfg;
+	u8 *buf = tlv->tlvinfo;
+	u16 offset = 0;
+	u8 priority;
+	int i;
+
+	etscfg = &dcbcfg->etscfg;
+
+	if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+		etscfg->willing = 1;
+
+	etscfg->cbs = 0;
+	/* Priority Group Table (4 octets)
+	 * Octets:|    1    |    2    |    3    |    4    |
+	 *        -----------------------------------------
+	 *        |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+	 *        -----------------------------------------
+	 *   Bits:|7  4|3  0|7  4|3  0|7  4|3  0|7  4|3  0|
+	 *        -----------------------------------------
+	 */
+	for (i = 0; i < 4; i++) {
+		priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
+				 I40E_CEE_PGID_PRIO_1_SHIFT);
+		etscfg->prioritytable[i * 2] =  priority;
+		priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
+				 I40E_CEE_PGID_PRIO_0_SHIFT);
+		etscfg->prioritytable[i * 2 + 1] = priority;
+		offset++;
+	}
+
+	/* PG Percentage Table (8 octets)
+	 * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+	 *        ---------------------------------
+	 *        |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+	 *        ---------------------------------
+	 */
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+		etscfg->tcbwtable[i] = buf[offset++];
+
+	/* Number of TCs supported (1 octet) */
+	etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * i40e_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ **/
+static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
+				      struct i40e_dcbx_config *dcbcfg)
+{
+	u8 *buf = tlv->tlvinfo;
+
+	if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
+		dcbcfg->pfc.willing = 1;
+
+	/* ------------------------
+	 * | PFC Enable | PFC TCs |
+	 * ------------------------
+	 * | 1 octet    | 1 octet |
+	 */
+	dcbcfg->pfc.pfcenable = buf[0];
+	dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * i40e_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ **/
+static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
+				   struct i40e_dcbx_config *dcbcfg)
+{
+	u16 length, typelength, offset = 0;
+	struct i40e_cee_app_prio *app;
+	u8 i, up, selector;
+
+	typelength = ntohs(tlv->hdr.typelen);
+	length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+		       I40E_LLDP_TLV_LEN_SHIFT);
+
+	dcbcfg->numapps = length / sizeof(*app);
+	if (!dcbcfg->numapps)
+		return;
+
+	for (i = 0; i < dcbcfg->numapps; i++) {
+		app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
+		for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
+			if (app->prio_map & BIT(up))
+				break;
+		}
+		dcbcfg->app[i].priority = up;
+
+		/* Get Selector from lower 2 bits, and convert to IEEE */
+		selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK);
+		if (selector == I40E_CEE_APP_SEL_ETHTYPE)
+			dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+		else if (selector == I40E_CEE_APP_SEL_TCPIP)
+			dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+		else
+			/* Keep selector as it is for unknown types */
+			dcbcfg->app[i].selector = selector;
+
+		dcbcfg->app[i].protocolid = ntohs(app->protocol);
+		/* Move to next app */
+		offset += sizeof(*app);
+	}
+}
+
+/**
+ * i40e_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ **/
+static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
+			       struct i40e_dcbx_config *dcbcfg)
+{
+	u16 len, tlvlen, sublen, typelength;
+	struct i40e_cee_feat_tlv *sub_tlv;
+	u8 subtype, feat_tlv_count = 0;
+	u32 ouisubtype;
+
+	ouisubtype = ntohl(tlv->ouisubtype);
+	subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
+		       I40E_LLDP_TLV_SUBTYPE_SHIFT);
+	/* Return if not CEE DCBX */
+	if (subtype != I40E_CEE_DCBX_TYPE)
+		return;
+
+	typelength = ntohs(tlv->typelength);
+	tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+			I40E_LLDP_TLV_LEN_SHIFT);
+	len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
+	      sizeof(struct i40e_cee_ctrl_tlv);
+	/* Return if no CEE DCBX Feature TLVs */
+	if (tlvlen <= len)
+		return;
+
+	sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
+	while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
+		typelength = ntohs(sub_tlv->hdr.typelen);
+		sublen = (u16)((typelength &
+				I40E_LLDP_TLV_LEN_MASK) >>
+				I40E_LLDP_TLV_LEN_SHIFT);
+		subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
+				I40E_LLDP_TLV_TYPE_SHIFT);
+		switch (subtype) {
+		case I40E_CEE_SUBTYPE_PG_CFG:
+			i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+			break;
+		case I40E_CEE_SUBTYPE_PFC_CFG:
+			i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+			break;
+		case I40E_CEE_SUBTYPE_APP_PRI:
+			i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
+			break;
+		default:
+			return; /* Invalid Sub-type return */
+		}
+		feat_tlv_count++;
+		/* Move to next sub TLV */
+		sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
+						sizeof(sub_tlv->hdr.typelen) +
+						sublen);
+	}
+}
+
+/**
  * i40e_parse_org_tlv
  * @tlv: Organization specific TLV
  * @dcbcfg: Local store to update ETS REC data
@@ -312,6 +496,9 @@
 	case I40E_IEEE_8021QAZ_OUI:
 		i40e_parse_ieee_tlv(tlv, dcbcfg);
 		break;
+	case I40E_CEE_DCBX_OUI:
+		i40e_parse_cee_tlv(tlv, dcbcfg);
+		break;
 	default:
 		break;
 	}
@@ -502,15 +689,18 @@
 	/* CEE PG data to ETS config */
 	dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
 
+	/* Note that the FW creates the oper_prio_tc nibbles reversed
+	 * from those in the CEE Priority Group sub-TLV.
+	 */
 	for (i = 0; i < 4; i++) {
 		tc = (u8)((cee_cfg->oper_prio_tc[i] &
-			 I40E_CEE_PGID_PRIO_1_MASK) >>
-			 I40E_CEE_PGID_PRIO_1_SHIFT);
-		dcbcfg->etscfg.prioritytable[i*2] =  tc;
-		tc = (u8)((cee_cfg->oper_prio_tc[i] &
 			 I40E_CEE_PGID_PRIO_0_MASK) >>
 			 I40E_CEE_PGID_PRIO_0_SHIFT);
-		dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
+		dcbcfg->etscfg.prioritytable[i * 2] =  tc;
+		tc = (u8)((cee_cfg->oper_prio_tc[i] &
+			 I40E_CEE_PGID_PRIO_1_MASK) >>
+			 I40E_CEE_PGID_PRIO_1_SHIFT);
+		dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc;
 	}
 
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
@@ -531,37 +721,85 @@
 	dcbcfg->pfc.pfcenable = cee_cfg->oper_pfc_en;
 	dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
 
-	status = (tlv_status & I40E_AQC_CEE_APP_STATUS_MASK) >>
-		  I40E_AQC_CEE_APP_STATUS_SHIFT;
+	i = 0;
+	status = (tlv_status & I40E_AQC_CEE_FCOE_STATUS_MASK) >>
+		  I40E_AQC_CEE_FCOE_STATUS_SHIFT;
 	err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
 	sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
 	oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
-	/* Add APPs if Error is False and Oper/Sync is True */
+	/* Add FCoE APP if Error is False and Oper/Sync is True */
 	if (!err && sync && oper) {
-		/* CEE operating configuration supports FCoE/iSCSI/FIP only */
-		dcbcfg->numapps = I40E_CEE_OPER_MAX_APPS;
-
 		/* FCoE APP */
-		dcbcfg->app[0].priority =
+		dcbcfg->app[i].priority =
 			(app_prio & I40E_AQC_CEE_APP_FCOE_MASK) >>
 			 I40E_AQC_CEE_APP_FCOE_SHIFT;
-		dcbcfg->app[0].selector = I40E_APP_SEL_ETHTYPE;
-		dcbcfg->app[0].protocolid = I40E_APP_PROTOID_FCOE;
+		dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+		dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FCOE;
+		i++;
+	}
 
+	status = (tlv_status & I40E_AQC_CEE_ISCSI_STATUS_MASK) >>
+		  I40E_AQC_CEE_ISCSI_STATUS_SHIFT;
+	err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+	sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+	oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+	/* Add iSCSI APP if Error is False and Oper/Sync is True */
+	if (!err && sync && oper) {
 		/* iSCSI APP */
-		dcbcfg->app[1].priority =
+		dcbcfg->app[i].priority =
 			(app_prio & I40E_AQC_CEE_APP_ISCSI_MASK) >>
 			 I40E_AQC_CEE_APP_ISCSI_SHIFT;
-		dcbcfg->app[1].selector = I40E_APP_SEL_TCPIP;
-		dcbcfg->app[1].protocolid = I40E_APP_PROTOID_ISCSI;
+		dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP;
+		dcbcfg->app[i].protocolid = I40E_APP_PROTOID_ISCSI;
+		i++;
+	}
 
+	status = (tlv_status & I40E_AQC_CEE_FIP_STATUS_MASK) >>
+		  I40E_AQC_CEE_FIP_STATUS_SHIFT;
+	err = (status & I40E_TLV_STATUS_ERR) ? 1 : 0;
+	sync = (status & I40E_TLV_STATUS_SYNC) ? 1 : 0;
+	oper = (status & I40E_TLV_STATUS_OPER) ? 1 : 0;
+	/* Add FIP APP if Error is False and Oper/Sync is True */
+	if (!err && sync && oper) {
 		/* FIP APP */
-		dcbcfg->app[2].priority =
+		dcbcfg->app[i].priority =
 			(app_prio & I40E_AQC_CEE_APP_FIP_MASK) >>
 			 I40E_AQC_CEE_APP_FIP_SHIFT;
-		dcbcfg->app[2].selector = I40E_APP_SEL_ETHTYPE;
-		dcbcfg->app[2].protocolid = I40E_APP_PROTOID_FIP;
+		dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE;
+		dcbcfg->app[i].protocolid = I40E_APP_PROTOID_FIP;
+		i++;
 	}
+	dcbcfg->numapps = i;
+}
+
+/**
+ * i40e_get_ieee_dcb_config
+ * @hw: pointer to the hw struct
+ *
+ * Get IEEE mode DCB configuration from the Firmware
+ **/
+static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+{
+	i40e_status ret = 0;
+
+	/* IEEE mode */
+	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+	/* Get Local DCB Config */
+	ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
+				     &hw->local_dcbx_config);
+	if (ret)
+		goto out;
+
+	/* Get Remote DCB Config */
+	ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
+				     I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+				     &hw->remote_dcbx_config);
+	/* Don't treat ENOENT as an error for Remote MIBs */
+	if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
+		ret = 0;
+
+out:
+	return ret;
 }
 
 /**
@@ -579,7 +817,7 @@
 	/* If Firmware version < v4.33 IEEE only */
 	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
 	    (hw->aq.fw_maj_ver < 4))
-		goto ieee;
+		return i40e_get_ieee_dcb_config(hw);
 
 	/* If Firmware version == v4.33 use old CEE struct */
 	if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
@@ -608,16 +846,14 @@
 
 	/* CEE mode not enabled try querying IEEE data */
 	if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
-		goto ieee;
-	else
+		return i40e_get_ieee_dcb_config(hw);
+
+	if (ret)
 		goto out;
 
-ieee:
-	/* IEEE mode */
-	hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
-	/* Get Local DCB Config */
+	/* Get CEE DCB Desired Config */
 	ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
-				     &hw->local_dcbx_config);
+				     &hw->desired_dcbx_config);
 	if (ret)
 		goto out;
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 50fc894..92d0104 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -44,6 +44,15 @@
 #define I40E_IEEE_SUBTYPE_PFC_CFG	11
 #define I40E_IEEE_SUBTYPE_APP_PRI	12
 
+#define I40E_CEE_DCBX_OUI		0x001b21
+#define I40E_CEE_DCBX_TYPE		2
+
+#define I40E_CEE_SUBTYPE_CTRL		1
+#define I40E_CEE_SUBTYPE_PG_CFG		2
+#define I40E_CEE_SUBTYPE_PFC_CFG	3
+#define I40E_CEE_SUBTYPE_APP_PRI	4
+
+#define I40E_CEE_MAX_FEAT_TYPE		3
 /* Defines for LLDP TLV header */
 #define I40E_LLDP_TLV_LEN_SHIFT		0
 #define I40E_LLDP_TLV_LEN_MASK		(0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
@@ -98,6 +107,36 @@
 	__be32 ouisubtype;
 	u8 tlvinfo[1];
 };
+
+struct i40e_cee_tlv_hdr {
+	__be16 typelen;
+	u8 operver;
+	u8 maxver;
+};
+
+struct i40e_cee_ctrl_tlv {
+	struct i40e_cee_tlv_hdr hdr;
+	__be32 seqno;
+	__be32 ackno;
+};
+
+struct i40e_cee_feat_tlv {
+	struct i40e_cee_tlv_hdr hdr;
+	u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK	0x80
+#define I40E_CEE_FEAT_TLV_WILLING_MASK	0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK	0x20
+	u8 subtype;
+	u8 tlvinfo[1];
+};
+
+struct i40e_cee_app_prio {
+	__be16 protocol;
+	u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define I40E_CEE_APP_SELECTOR_MASK	0x03
+	__be16 lower_oui;
+	u8 prio_map;
+};
 #pragma pack()
 
 i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 1c51f73..886e667 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -236,14 +236,13 @@
 			       struct i40e_dcb_app_priority_table *app)
 {
 	int v, err;
+
 	for (v = 0; v < pf->num_alloc_vsi; v++) {
 		if (pf->vsi[v] && pf->vsi[v]->netdev) {
 			err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
-			if (err)
-				dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
-					 __func__, pf->vsi[v]->seid,
-					 err, app->selector,
-					 app->protocolid, app->priority);
+			dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n",
+				pf->vsi[v]->seid, err, app->selector,
+				app->protocolid, app->priority);
 		}
 	}
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 508efb0..d4b7af9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -404,82 +404,82 @@
 	nstat = i40e_get_vsi_stats_struct(vsi);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
-		 (long unsigned int)nstat->rx_packets,
-		 (long unsigned int)nstat->rx_bytes,
-		 (long unsigned int)nstat->rx_errors,
-		 (long unsigned int)nstat->rx_dropped);
+		 (unsigned long int)nstat->rx_packets,
+		 (unsigned long int)nstat->rx_bytes,
+		 (unsigned long int)nstat->rx_errors,
+		 (unsigned long int)nstat->rx_dropped);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
-		 (long unsigned int)nstat->tx_packets,
-		 (long unsigned int)nstat->tx_bytes,
-		 (long unsigned int)nstat->tx_errors,
-		 (long unsigned int)nstat->tx_dropped);
+		 (unsigned long int)nstat->tx_packets,
+		 (unsigned long int)nstat->tx_bytes,
+		 (unsigned long int)nstat->tx_errors,
+		 (unsigned long int)nstat->tx_dropped);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: multicast = %lu, collisions = %lu\n",
-		 (long unsigned int)nstat->multicast,
-		 (long unsigned int)nstat->collisions);
+		 (unsigned long int)nstat->multicast,
+		 (unsigned long int)nstat->collisions);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
-		 (long unsigned int)nstat->rx_length_errors,
-		 (long unsigned int)nstat->rx_over_errors,
-		 (long unsigned int)nstat->rx_crc_errors);
+		 (unsigned long int)nstat->rx_length_errors,
+		 (unsigned long int)nstat->rx_over_errors,
+		 (unsigned long int)nstat->rx_crc_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
-		 (long unsigned int)nstat->rx_frame_errors,
-		 (long unsigned int)nstat->rx_fifo_errors,
-		 (long unsigned int)nstat->rx_missed_errors);
+		 (unsigned long int)nstat->rx_frame_errors,
+		 (unsigned long int)nstat->rx_fifo_errors,
+		 (unsigned long int)nstat->rx_missed_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
-		 (long unsigned int)nstat->tx_aborted_errors,
-		 (long unsigned int)nstat->tx_carrier_errors,
-		 (long unsigned int)nstat->tx_fifo_errors);
+		 (unsigned long int)nstat->tx_aborted_errors,
+		 (unsigned long int)nstat->tx_carrier_errors,
+		 (unsigned long int)nstat->tx_fifo_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
-		 (long unsigned int)nstat->tx_heartbeat_errors,
-		 (long unsigned int)nstat->tx_window_errors);
+		 (unsigned long int)nstat->tx_heartbeat_errors,
+		 (unsigned long int)nstat->tx_window_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
-		 (long unsigned int)nstat->rx_compressed,
-		 (long unsigned int)nstat->tx_compressed);
+		 (unsigned long int)nstat->rx_compressed,
+		 (unsigned long int)nstat->tx_compressed);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.rx_packets,
-		 (long unsigned int)vsi->net_stats_offsets.rx_bytes,
-		 (long unsigned int)vsi->net_stats_offsets.rx_errors,
-		 (long unsigned int)vsi->net_stats_offsets.rx_dropped);
+		 (unsigned long int)vsi->net_stats_offsets.rx_packets,
+		 (unsigned long int)vsi->net_stats_offsets.rx_bytes,
+		 (unsigned long int)vsi->net_stats_offsets.rx_errors,
+		 (unsigned long int)vsi->net_stats_offsets.rx_dropped);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.tx_packets,
-		 (long unsigned int)vsi->net_stats_offsets.tx_bytes,
-		 (long unsigned int)vsi->net_stats_offsets.tx_errors,
-		 (long unsigned int)vsi->net_stats_offsets.tx_dropped);
+		 (unsigned long int)vsi->net_stats_offsets.tx_packets,
+		 (unsigned long int)vsi->net_stats_offsets.tx_bytes,
+		 (unsigned long int)vsi->net_stats_offsets.tx_errors,
+		 (unsigned long int)vsi->net_stats_offsets.tx_dropped);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: multicast = %lu, collisions = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.multicast,
-		 (long unsigned int)vsi->net_stats_offsets.collisions);
+		 (unsigned long int)vsi->net_stats_offsets.multicast,
+		 (unsigned long int)vsi->net_stats_offsets.collisions);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.rx_length_errors,
-		 (long unsigned int)vsi->net_stats_offsets.rx_over_errors,
-		 (long unsigned int)vsi->net_stats_offsets.rx_crc_errors);
+		 (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
+		 (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
+		 (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.rx_frame_errors,
-		 (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors,
-		 (long unsigned int)vsi->net_stats_offsets.rx_missed_errors);
+		 (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
+		 (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
+		 (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors,
-		 (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors,
-		 (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors);
+		 (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
+		 (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
+		 (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors,
-		 (long unsigned int)vsi->net_stats_offsets.tx_window_errors);
+		 (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
+		 (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
 	dev_info(&pf->pdev->dev,
 		 "    net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
-		 (long unsigned int)vsi->net_stats_offsets.rx_compressed,
-		 (long unsigned int)vsi->net_stats_offsets.tx_compressed);
+		 (unsigned long int)vsi->net_stats_offsets.rx_compressed,
+		 (unsigned long int)vsi->net_stats_offsets.tx_compressed);
 	dev_info(&pf->pdev->dev,
 		 "    tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
 		 vsi->tx_restart, vsi->tx_busy,
@@ -487,6 +487,7 @@
 	rcu_read_lock();
 	for (i = 0; i < vsi->num_queue_pairs; i++) {
 		struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+
 		if (!rx_ring)
 			continue;
 
@@ -527,7 +528,7 @@
 		dev_info(&pf->pdev->dev,
 			 "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
 			 i, rx_ring->size,
-			 (long unsigned int)rx_ring->dma);
+			 (unsigned long int)rx_ring->dma);
 		dev_info(&pf->pdev->dev,
 			 "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
 			 i, rx_ring->vsi,
@@ -535,6 +536,7 @@
 	}
 	for (i = 0; i < vsi->num_queue_pairs; i++) {
 		struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+
 		if (!tx_ring)
 			continue;
 
@@ -573,7 +575,7 @@
 		dev_info(&pf->pdev->dev,
 			 "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
 			 i, tx_ring->size,
-			 (long unsigned int)tx_ring->dma);
+			 (unsigned long int)tx_ring->dma);
 		dev_info(&pf->pdev->dev,
 			 "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
 			 i, tx_ring->vsi,
@@ -743,6 +745,7 @@
 	ring = &(hw->aq.asq);
 	for (i = 0; i < ring->count; i++) {
 		struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
 		dev_info(&pf->pdev->dev,
 			 "   at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
 			 i, d->flags, d->opcode, d->datalen, d->retval,
@@ -755,6 +758,7 @@
 	ring = &(hw->aq.arq);
 	for (i = 0; i < ring->count; i++) {
 		struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+
 		dev_info(&pf->pdev->dev,
 			 "   ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
 			 i, d->flags, d->opcode, d->datalen, d->retval,
@@ -949,24 +953,6 @@
 	}
 }
 
-/**
- * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR
- * @pf: the PF that would be altered
- * @flag: flag that needs enabling or disabling
- * @enable: Enable/disable FD SD/ATR
- **/
-static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
-{
-	if (enable) {
-		pf->flags |= flag;
-	} else {
-		pf->flags &= ~flag;
-		pf->auto_disable_flags |= flag;
-	}
-	dev_info(&pf->pdev->dev, "requesting a PF reset\n");
-	i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
-}
-
 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
 /**
  * i40e_dbg_command_write - write into command datum
@@ -1038,7 +1024,13 @@
 			dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
 
 	} else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
-		sscanf(&cmd_buf[7], "%i", &vsi_seid);
+		cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+		if (cnt != 1) {
+			dev_info(&pf->pdev->dev,
+				 "del vsi: bad command string, cnt=%d\n",
+				 cnt);
+			goto command_write_done;
+		}
 		vsi = i40e_dbg_find_vsi(pf, vsi_seid);
 		if (!vsi) {
 			dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
@@ -1145,7 +1137,9 @@
 			goto command_write_done;
 		}
 
+		spin_lock_bh(&vsi->mac_filter_list_lock);
 		f = i40e_add_filter(vsi, ma, vlan, false, false);
+		spin_unlock_bh(&vsi->mac_filter_list_lock);
 		ret = i40e_sync_vsi_filters(vsi, true);
 		if (f && !ret)
 			dev_info(&pf->pdev->dev,
@@ -1182,7 +1176,9 @@
 			goto command_write_done;
 		}
 
+		spin_lock_bh(&vsi->mac_filter_list_lock);
 		i40e_del_filter(vsi, ma, vlan, false, false);
+		spin_unlock_bh(&vsi->mac_filter_list_lock);
 		ret = i40e_sync_vsi_filters(vsi, true);
 		if (!ret)
 			dev_info(&pf->pdev->dev,
@@ -1488,6 +1484,7 @@
 	} else if (strncmp(cmd_buf, "read", 4) == 0) {
 		u32 address;
 		u32 value;
+
 		cnt = sscanf(&cmd_buf[4], "%i", &address);
 		if (cnt != 1) {
 			dev_info(&pf->pdev->dev, "read <reg>\n");
@@ -1495,9 +1492,9 @@
 		}
 
 		/* check the range on address */
-		if (address >= I40E_MAX_REGISTER) {
-			dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n",
-				 address);
+		if (address > (pf->ioremap_len - sizeof(u32))) {
+			dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
+				 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
 			goto command_write_done;
 		}
 
@@ -1507,6 +1504,7 @@
 
 	} else if (strncmp(cmd_buf, "write", 5) == 0) {
 		u32 address, value;
+
 		cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
 		if (cnt != 2) {
 			dev_info(&pf->pdev->dev, "write <reg> <value>\n");
@@ -1514,9 +1512,9 @@
 		}
 
 		/* check the range on address */
-		if (address >= I40E_MAX_REGISTER) {
-			dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n",
-				 address);
+		if (address > (pf->ioremap_len - sizeof(u32))) {
+			dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
+				 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
 			goto command_write_done;
 		}
 		wr32(&pf->hw, address, value);
@@ -1528,6 +1526,7 @@
 			cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
 			if (cnt == 0) {
 				int i;
+
 				for (i = 0; i < pf->num_alloc_vsi; i++)
 					i40e_vsi_reset_stats(pf->vsi[i]);
 				dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
@@ -1726,8 +1725,9 @@
 				   packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
 
 		for (i = 0; i < packet_len; i++) {
-			sscanf(&asc_packet[j], "%2hhx ",
-			       &raw_packet[i]);
+			cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
+			if (!cnt)
+				break;
 			j += 3;
 		}
 		dev_info(&pf->pdev->dev, "FD raw packet dump\n");
@@ -1745,16 +1745,13 @@
 		raw_packet = NULL;
 		kfree(asc_packet);
 		asc_packet = NULL;
-	} else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
-		i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
-	} else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
-		i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
 	} else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
 		dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
 			 i40e_get_current_fd_count(pf));
 	} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
 		if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
 			int ret;
+
 			ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
@@ -1779,6 +1776,7 @@
 #endif /* CONFIG_I40E_DCB */
 		} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
 			int ret;
+
 			ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
 						pf->hw.mac.addr,
 						I40E_ETH_P_LLDP, 0,
@@ -1807,6 +1805,7 @@
 			u16 llen, rlen;
 			int ret;
 			u8 *buff;
+
 			buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
 			if (!buff)
 				goto command_write_done;
@@ -1833,6 +1832,7 @@
 			u16 llen, rlen;
 			int ret;
 			u8 *buff;
+
 			buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
 			if (!buff)
 				goto command_write_done;
@@ -1858,6 +1858,7 @@
 			buff = NULL;
 		} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
 			int ret;
+
 			ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
 								true, NULL);
 			if (ret) {
@@ -1868,6 +1869,7 @@
 			}
 		} else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
 			int ret;
+
 			ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
 								false, NULL);
 			if (ret) {
@@ -1969,8 +1971,6 @@
 		dev_info(&pf->pdev->dev, "  send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
 		dev_info(&pf->pdev->dev, "  add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
 		dev_info(&pf->pdev->dev, "  rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
-		dev_info(&pf->pdev->dev, "  fd-atr off\n");
-		dev_info(&pf->pdev->dev, "  fd-atr on\n");
 		dev_info(&pf->pdev->dev, "  fd current cnt");
 		dev_info(&pf->pdev->dev, "  lldp start\n");
 		dev_info(&pf->pdev->dev, "  lldp stop\n");
@@ -2105,6 +2105,7 @@
 		}
 	} else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
 		int mtu;
+
 		cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
 			     &vsi_seid, &mtu);
 		if (cnt != 2) {
@@ -2220,7 +2221,6 @@
 create_failed:
 	dev_info(dev, "debugfs dir/file for %s failed\n", name);
 	debugfs_remove_recursive(pf->i40e_dbg_pf);
-	return;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
new file mode 100644
index 0000000..c601ca4
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710		0x1572
+#define I40E_DEV_ID_QEMU		0x1574
+#define I40E_DEV_ID_KX_A		0x157F
+#define I40E_DEV_ID_KX_B		0x1580
+#define I40E_DEV_ID_KX_C		0x1581
+#define I40E_DEV_ID_QSFP_A		0x1583
+#define I40E_DEV_ID_QSFP_B		0x1584
+#define I40E_DEV_ID_QSFP_C		0x1585
+#define I40E_DEV_ID_10G_BASE_T		0x1586
+#define I40E_DEV_ID_20G_KR2		0x1587
+#define I40E_DEV_ID_20G_KR2_A		0x1588
+#define I40E_DEV_ID_10G_BASE_T4		0x1589
+#define I40E_DEV_ID_VF			0x154C
+#define I40E_DEV_ID_VF_HV		0x1571
+#define I40E_DEV_ID_SFP_X722		0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
+#define I40E_DEV_ID_X722_VF		0x37CD
+#define I40E_DEV_ID_X722_VF_HV		0x37D9
+
+#define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
+					 (d) == I40E_DEV_ID_QSFP_B  || \
+					 (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index e972b5e..3f385ff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -87,11 +87,9 @@
 	I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
 	I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
 	I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+	I40E_VSI_STAT("tx_linearize", tx_linearize),
 };
 
-static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
-				 struct ethtool_rxnfc *cmd);
-
 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
  * but they are separate.  This device supports Virtualization, and
  * as such might have several netdevs supporting VMDq and FCoE going
@@ -229,10 +227,12 @@
 
 static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
 	"NPAR",
+	"LinkPolling",
+	"flow-director-atr",
+	"veb-stats",
 };
 
-#define I40E_PRIV_FLAGS_STR_LEN \
-	(sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN)
+#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
 
 /**
  * i40e_partition_setting_complaint - generic complaint for MFP restriction
@@ -253,7 +253,8 @@
  **/
 static void i40e_get_settings_link_up(struct i40e_hw *hw,
 				      struct ethtool_cmd *ecmd,
-				      struct net_device *netdev)
+				      struct net_device *netdev,
+				      struct i40e_pf *pf)
 {
 	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
 	u32 link_speed = hw_link_info->link_speed;
@@ -272,65 +273,49 @@
 	case I40E_PHY_TYPE_40GBASE_AOC:
 		ecmd->supported = SUPPORTED_40000baseCR4_Full;
 		break;
-	case I40E_PHY_TYPE_40GBASE_KR4:
-		ecmd->supported = SUPPORTED_Autoneg |
-				  SUPPORTED_40000baseKR4_Full;
-		ecmd->advertising = ADVERTISED_Autoneg |
-				    ADVERTISED_40000baseKR4_Full;
-		break;
 	case I40E_PHY_TYPE_40GBASE_SR4:
 		ecmd->supported = SUPPORTED_40000baseSR4_Full;
 		break;
 	case I40E_PHY_TYPE_40GBASE_LR4:
 		ecmd->supported = SUPPORTED_40000baseLR4_Full;
 		break;
-	case I40E_PHY_TYPE_20GBASE_KR2:
-		ecmd->supported = SUPPORTED_Autoneg |
-				  SUPPORTED_20000baseKR2_Full;
-		ecmd->advertising = ADVERTISED_Autoneg |
-				    ADVERTISED_20000baseKR2_Full;
-		break;
-	case I40E_PHY_TYPE_10GBASE_KX4:
-		ecmd->supported = SUPPORTED_Autoneg |
-				  SUPPORTED_10000baseKX4_Full;
-		ecmd->advertising = ADVERTISED_Autoneg |
-				    ADVERTISED_10000baseKX4_Full;
-		break;
-	case I40E_PHY_TYPE_10GBASE_KR:
-		ecmd->supported = SUPPORTED_Autoneg |
-				  SUPPORTED_10000baseKR_Full;
-		ecmd->advertising = ADVERTISED_Autoneg |
-				    ADVERTISED_10000baseKR_Full;
-		break;
 	case I40E_PHY_TYPE_10GBASE_SR:
 	case I40E_PHY_TYPE_10GBASE_LR:
 	case I40E_PHY_TYPE_1000BASE_SX:
 	case I40E_PHY_TYPE_1000BASE_LX:
-		ecmd->supported = SUPPORTED_10000baseT_Full |
-				  SUPPORTED_1000baseT_Full;
+		ecmd->supported = SUPPORTED_10000baseT_Full;
+		if (hw_link_info->module_type[2] &
+		    I40E_MODULE_TYPE_1000BASE_SX ||
+		    hw_link_info->module_type[2] &
+		    I40E_MODULE_TYPE_1000BASE_LX) {
+			ecmd->supported |= SUPPORTED_1000baseT_Full;
+			if (hw_link_info->requested_speeds &
+			    I40E_LINK_SPEED_1GB)
+				ecmd->advertising |= ADVERTISED_1000baseT_Full;
+		}
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
-		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-			ecmd->advertising |= ADVERTISED_1000baseT_Full;
-		break;
-	case I40E_PHY_TYPE_1000BASE_KX:
-		ecmd->supported = SUPPORTED_Autoneg |
-				  SUPPORTED_1000baseKX_Full;
-		ecmd->advertising = ADVERTISED_Autoneg |
-				    ADVERTISED_1000baseKX_Full;
 		break;
 	case I40E_PHY_TYPE_10GBASE_T:
 	case I40E_PHY_TYPE_1000BASE_T:
-	case I40E_PHY_TYPE_100BASE_TX:
 		ecmd->supported = SUPPORTED_Autoneg |
 				  SUPPORTED_10000baseT_Full |
-				  SUPPORTED_1000baseT_Full |
-				  SUPPORTED_100baseT_Full;
+				  SUPPORTED_1000baseT_Full;
 		ecmd->advertising = ADVERTISED_Autoneg;
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
+		break;
+	case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
+		ecmd->supported = SUPPORTED_Autoneg |
+				  SUPPORTED_1000baseT_Full;
+		ecmd->advertising = ADVERTISED_Autoneg |
+				    ADVERTISED_1000baseT_Full;
+		break;
+	case I40E_PHY_TYPE_100BASE_TX:
+		ecmd->supported = SUPPORTED_Autoneg |
+				  SUPPORTED_100baseT_Full;
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
 			ecmd->advertising |= ADVERTISED_100baseT_Full;
 		break;
@@ -350,12 +335,24 @@
 		break;
 	case I40E_PHY_TYPE_SGMII:
 		ecmd->supported = SUPPORTED_Autoneg |
-				  SUPPORTED_1000baseT_Full |
-				  SUPPORTED_100baseT_Full;
+				  SUPPORTED_1000baseT_Full;
 		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
-		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
-			ecmd->advertising |= ADVERTISED_100baseT_Full;
+		if (pf->hw.mac.type == I40E_MAC_X722) {
+			ecmd->supported |= SUPPORTED_100baseT_Full;
+			if (hw_link_info->requested_speeds &
+			    I40E_LINK_SPEED_100MB)
+				ecmd->advertising |= ADVERTISED_100baseT_Full;
+		}
+		break;
+	/* Backplane is set based on supported phy types in get_settings
+	 * so don't set anything here but don't warn either
+	 */
+	case I40E_PHY_TYPE_40GBASE_KR4:
+	case I40E_PHY_TYPE_20GBASE_KR2:
+	case I40E_PHY_TYPE_10GBASE_KR:
+	case I40E_PHY_TYPE_10GBASE_KX4:
+	case I40E_PHY_TYPE_1000BASE_KX:
 		break;
 	default:
 		/* if we got here and link is up something bad is afoot */
@@ -394,64 +391,73 @@
  * Reports link settings that can be determined when link is down
  **/
 static void i40e_get_settings_link_down(struct i40e_hw *hw,
-					struct ethtool_cmd *ecmd)
+					struct ethtool_cmd *ecmd,
+					struct i40e_pf *pf)
 {
-	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+	enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
 
 	/* link is down and the driver needs to fall back on
-	 * device ID to determine what kinds of info to display,
-	 * it's mostly a guess that may change when link is up
+	 * supported phy types to figure out what info to display
 	 */
-	switch (hw->device_id) {
-	case I40E_DEV_ID_QSFP_A:
-	case I40E_DEV_ID_QSFP_B:
-	case I40E_DEV_ID_QSFP_C:
-		/* pluggable QSFP */
-		ecmd->supported = SUPPORTED_40000baseSR4_Full |
-				  SUPPORTED_40000baseCR4_Full |
-				  SUPPORTED_40000baseLR4_Full;
-		ecmd->advertising = ADVERTISED_40000baseSR4_Full |
-				    ADVERTISED_40000baseCR4_Full |
-				    ADVERTISED_40000baseLR4_Full;
-		break;
-	case I40E_DEV_ID_KX_B:
-		/* backplane 40G */
-		ecmd->supported = SUPPORTED_40000baseKR4_Full;
-		ecmd->advertising = ADVERTISED_40000baseKR4_Full;
-		break;
-	case I40E_DEV_ID_KX_C:
-		/* backplane 10G */
-		ecmd->supported = SUPPORTED_10000baseKR_Full;
-		ecmd->advertising = ADVERTISED_10000baseKR_Full;
-		break;
-	case I40E_DEV_ID_10G_BASE_T:
-		ecmd->supported = SUPPORTED_10000baseT_Full |
-				  SUPPORTED_1000baseT_Full |
-				  SUPPORTED_100baseT_Full;
-		/* Figure out what has been requested */
-		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-			ecmd->advertising |= ADVERTISED_10000baseT_Full;
-		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-			ecmd->advertising |= ADVERTISED_1000baseT_Full;
-		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+	ecmd->supported = 0x0;
+	ecmd->advertising = 0x0;
+	if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+		ecmd->supported |= SUPPORTED_Autoneg |
+				   SUPPORTED_1000baseT_Full;
+		ecmd->advertising |= ADVERTISED_Autoneg |
+				     ADVERTISED_1000baseT_Full;
+		if (pf->hw.mac.type == I40E_MAC_X722) {
+			ecmd->supported |= SUPPORTED_100baseT_Full;
 			ecmd->advertising |= ADVERTISED_100baseT_Full;
-		break;
-	case I40E_DEV_ID_20G_KR2:
-		/* backplane 20G */
-		ecmd->supported = SUPPORTED_20000baseKR2_Full;
-		ecmd->advertising = ADVERTISED_20000baseKR2_Full;
-		break;
-	default:
-		/* all the rest are 10G/1G */
-		ecmd->supported = SUPPORTED_10000baseT_Full |
-				  SUPPORTED_1000baseT_Full;
-		/* Figure out what has been requested */
-		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
-			ecmd->advertising |= ADVERTISED_10000baseT_Full;
-		if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
-			ecmd->advertising |= ADVERTISED_1000baseT_Full;
-		break;
+		}
 	}
+	if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+	    phy_types & I40E_CAP_PHY_TYPE_XFI ||
+	    phy_types & I40E_CAP_PHY_TYPE_SFI ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+		ecmd->supported |= SUPPORTED_10000baseT_Full;
+	if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+	    phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+		ecmd->supported |= SUPPORTED_Autoneg |
+				   SUPPORTED_10000baseT_Full;
+		ecmd->advertising |= ADVERTISED_Autoneg |
+				     ADVERTISED_10000baseT_Full;
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+	    phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+	    phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+		ecmd->supported |= SUPPORTED_40000baseCR4_Full;
+	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+	    phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+		ecmd->supported |= SUPPORTED_Autoneg |
+				  SUPPORTED_40000baseCR4_Full;
+		ecmd->advertising |= ADVERTISED_Autoneg |
+				    ADVERTISED_40000baseCR4_Full;
+	}
+	if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
+	    !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+		ecmd->supported |= SUPPORTED_Autoneg |
+				   SUPPORTED_100baseT_Full;
+		ecmd->advertising |= ADVERTISED_Autoneg |
+				     ADVERTISED_100baseT_Full;
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+	    phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+		ecmd->supported |= SUPPORTED_Autoneg |
+				   SUPPORTED_1000baseT_Full;
+		ecmd->advertising |= ADVERTISED_Autoneg |
+				     ADVERTISED_1000baseT_Full;
+	}
+	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+		ecmd->supported |= SUPPORTED_40000baseSR4_Full;
+	if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+		ecmd->supported |= SUPPORTED_40000baseLR4_Full;
 
 	/* With no link speed and duplex are unknown */
 	ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
@@ -475,12 +481,43 @@
 	bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
 
 	if (link_up)
-		i40e_get_settings_link_up(hw, ecmd, netdev);
+		i40e_get_settings_link_up(hw, ecmd, netdev, pf);
 	else
-		i40e_get_settings_link_down(hw, ecmd);
+		i40e_get_settings_link_down(hw, ecmd, pf);
 
 	/* Now set the settings that don't rely on link being up/down */
 
+	/* For backplane, supported and advertised are only reliant on the
+	 * phy types the NVM specifies are supported.
+	 */
+	if (hw->device_id == I40E_DEV_ID_KX_B ||
+	    hw->device_id == I40E_DEV_ID_KX_C ||
+	    hw->device_id == I40E_DEV_ID_20G_KR2 ||
+	    hw->device_id ==  I40E_DEV_ID_20G_KR2_A) {
+		ecmd->supported = SUPPORTED_Autoneg;
+		ecmd->advertising = ADVERTISED_Autoneg;
+		if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+			ecmd->supported |= SUPPORTED_40000baseKR4_Full;
+			ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
+		}
+		if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+			ecmd->supported |= SUPPORTED_20000baseKR2_Full;
+			ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
+		}
+		if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+			ecmd->supported |= SUPPORTED_10000baseKR_Full;
+			ecmd->advertising |= ADVERTISED_10000baseKR_Full;
+		}
+		if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+			ecmd->supported |= SUPPORTED_10000baseKX4_Full;
+			ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
+		}
+		if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+			ecmd->supported |= SUPPORTED_1000baseKX_Full;
+			ecmd->advertising |= ADVERTISED_1000baseKX_Full;
+		}
+	}
+
 	/* Set autoneg settings */
 	ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
 			  AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -580,6 +617,14 @@
 	    hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
 		return -EOPNOTSUPP;
 
+	if (hw->device_id == I40E_DEV_ID_KX_B ||
+	    hw->device_id == I40E_DEV_ID_KX_C ||
+	    hw->device_id == I40E_DEV_ID_20G_KR2 ||
+	    hw->device_id == I40E_DEV_ID_20G_KR2_A) {
+		netdev_info(netdev, "Changing settings is not supported on backplane.\n");
+		return -EOPNOTSUPP;
+	}
+
 	/* get our own copy of the bits to check against */
 	memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
 	i40e_get_settings(netdev, &safe_ecmd);
@@ -616,28 +661,31 @@
 
 	/* Check autoneg */
 	if (autoneg == AUTONEG_ENABLE) {
-		/* If autoneg is not supported, return error */
-		if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
-			netdev_info(netdev, "Autoneg not supported on this phy\n");
-			return -EINVAL;
-		}
 		/* If autoneg was not already enabled */
 		if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
+			/* If autoneg is not supported, return error */
+			if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+				netdev_info(netdev, "Autoneg not supported on this phy\n");
+				return -EINVAL;
+			}
+			/* Autoneg is allowed to change */
 			config.abilities = abilities.abilities |
 					   I40E_AQ_PHY_ENABLE_AN;
 			change = true;
 		}
 	} else {
-		/* If autoneg is supported 10GBASE_T is the only phy that
-		 * can disable it, so otherwise return error
-		 */
-		if (safe_ecmd.supported & SUPPORTED_Autoneg &&
-		    hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
-			netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
-			return -EINVAL;
-		}
 		/* If autoneg is currently enabled */
 		if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
+			/* If autoneg is supported 10GBASE_T is the only PHY
+			 * that can disable it, so otherwise return error
+			 */
+			if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+			    hw->phy.link_info.phy_type !=
+			    I40E_PHY_TYPE_10GBASE_T) {
+				netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+				return -EINVAL;
+			}
+			/* Autoneg is allowed to change */
 			config.abilities = abilities.abilities &
 					   ~I40E_AQ_PHY_ENABLE_AN;
 			change = true;
@@ -664,6 +712,13 @@
 	    advertise & ADVERTISED_40000baseLR4_Full)
 		config.link_speed |= I40E_LINK_SPEED_40GB;
 
+	/* If speed didn't get set, set it to what it currently is.
+	 * This is needed because if advertise is 0 (as it is when autoneg
+	 * is disabled) then speed won't get set.
+	 */
+	if (!config.link_speed)
+		config.link_speed = abilities.link_speed;
+
 	if (change || (abilities.link_speed != config.link_speed)) {
 		/* copy over the rest of the abilities */
 		config.phy_type = abilities.phy_type;
@@ -680,7 +735,7 @@
 			/* Tell the OS link is going down, the link will go
 			 * back up when fw says it is ready asynchronously
 			 */
-			netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n");
+			i40e_print_link_message(vsi, false);
 			netif_carrier_off(netdev);
 			netif_tx_stop_all_queues(netdev);
 		}
@@ -694,11 +749,11 @@
 			return -EAGAIN;
 		}
 
-		status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+		status = i40e_update_link_info(hw);
 		if (status)
-			netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n",
-				    i40e_stat_str(hw, status),
-				    i40e_aq_str(hw, hw->aq.asq_last_status));
+			netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n",
+				   i40e_stat_str(hw, status),
+				   i40e_aq_str(hw, hw->aq.asq_last_status));
 
 	} else {
 		netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -824,7 +879,7 @@
 	/* Tell the OS link is going down, the link will go back up when fw
 	 * says it is ready asynchronously
 	 */
-	netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n");
+	i40e_print_link_message(vsi, false);
 	netif_carrier_off(netdev);
 	netif_tx_stop_all_queues(netdev);
 
@@ -948,9 +1003,7 @@
 
 		cmd = (struct i40e_nvm_access *)eeprom;
 		ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
-		if (ret_val &&
-		    ((hw->aq.asq_last_status != I40E_AQ_RC_EACCES) ||
-		     (hw->debug_mask & I40E_DEBUG_NVM)))
+		if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
 			dev_info(&pf->pdev->dev,
 				 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
 				 ret_val, hw->aq.asq_last_status, errno,
@@ -1054,10 +1107,7 @@
 
 	cmd = (struct i40e_nvm_access *)eeprom;
 	ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
-	if (ret_val &&
-	    ((hw->aq.asq_last_status != I40E_AQ_RC_EPERM &&
-	      hw->aq.asq_last_status != I40E_AQ_RC_EBUSY) ||
-	     (hw->debug_mask & I40E_DEBUG_NVM)))
+	if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
 		dev_info(&pf->pdev->dev,
 			 "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
 			 ret_val, hw->aq.asq_last_status, errno,
@@ -1077,11 +1127,10 @@
 	strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
 	strlcpy(drvinfo->version, i40e_driver_version_str,
 		sizeof(drvinfo->version));
-	strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
+	strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
 		sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
 }
 
 static void i40e_get_ringparam(struct net_device *netdev,
@@ -1166,6 +1215,11 @@
 			/* clone ring and setup updated count */
 			tx_rings[i] = *vsi->tx_rings[i];
 			tx_rings[i].count = new_tx_count;
+			/* the desc and bi pointers will be reallocated in the
+			 * setup call
+			 */
+			tx_rings[i].desc = NULL;
+			tx_rings[i].rx_bi = NULL;
 			err = i40e_setup_tx_descriptors(&tx_rings[i]);
 			if (err) {
 				while (i) {
@@ -1196,6 +1250,11 @@
 			/* clone ring and setup updated count */
 			rx_rings[i] = *vsi->rx_rings[i];
 			rx_rings[i].count = new_rx_count;
+			/* the desc and bi pointers will be reallocated in the
+			 * setup call
+			 */
+			rx_rings[i].desc = NULL;
+			rx_rings[i].rx_bi = NULL;
 			err = i40e_setup_rx_descriptors(&rx_rings[i]);
 			if (err) {
 				while (i) {
@@ -1263,7 +1322,8 @@
 		if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) {
 			int len = I40E_PF_STATS_LEN(netdev);
 
-			if (pf->lan_veb != I40E_NO_VEB)
+			if ((pf->lan_veb != I40E_NO_VEB) &&
+			    (pf->flags & I40E_FLAG_VEB_STATS_ENABLED))
 				len += I40E_VEB_STATS_TOTAL;
 			return len;
 		} else {
@@ -1336,14 +1396,22 @@
 	if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
 		return;
 
-	if (pf->lan_veb != I40E_NO_VEB) {
+	if ((pf->lan_veb != I40E_NO_VEB) &&
+	    (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
 		struct i40e_veb *veb = pf->veb[pf->lan_veb];
+
 		for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
 			p = (char *)veb;
 			p += i40e_gstrings_veb_stats[j].stat_offset;
 			data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
 				     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
 		}
+		for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) {
+			data[i++] = veb->tc_stats.tc_tx_packets[j];
+			data[i++] = veb->tc_stats.tc_tx_bytes[j];
+			data[i++] = veb->tc_stats.tc_rx_packets[j];
+			data[i++] = veb->tc_stats.tc_rx_bytes[j];
+		}
 	}
 	for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
 		p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
@@ -1409,7 +1477,8 @@
 		if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
 			return;
 
-		if (pf->lan_veb != I40E_NO_VEB) {
+		if ((pf->lan_veb != I40E_NO_VEB) &&
+		    (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
 			for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
 				snprintf(p, ETH_GSTRING_LEN, "veb.%s",
 					i40e_gstrings_veb_stats[i].stat_string);
@@ -1504,9 +1573,18 @@
 {
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
+	i40e_status status;
+	bool link_up = false;
 
 	netif_info(pf, hw, netdev, "link test\n");
-	if (i40e_get_link_status(&pf->hw))
+	status = i40e_get_link_status(&pf->hw, &link_up);
+	if (status) {
+		netif_err(pf, drv, netdev, "link query timed out, please retry test\n");
+		*data = 1;
+		return *data;
+	}
+
+	if (link_up)
 		*data = 0;
 	else
 		*data = 1;
@@ -1575,7 +1653,7 @@
 	int i;
 
 	for (i = 0; i < pf->num_alloc_vfs; i++)
-		if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
+		if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states))
 			return true;
 	return false;
 }
@@ -1782,6 +1860,14 @@
 
 	ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
 	ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+	/* we use the _usecs_high to store/set the interrupt rate limit
+	 * that the hardware supports, that almost but not quite
+	 * fits the original intent of the ethtool variable,
+	 * the rx_coalesce_usecs_high limits total interrupts
+	 * per second from both tx/rx sources.
+	 */
+	ec->rx_coalesce_usecs_high = vsi->int_rate_limit;
+	ec->tx_coalesce_usecs_high = vsi->int_rate_limit;
 
 	return 0;
 }
@@ -1800,6 +1886,17 @@
 	if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
 		vsi->work_limit = ec->tx_max_coalesced_frames_irq;
 
+	/* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */
+	if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) {
+		netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n");
+		return -EINVAL;
+	}
+
+	if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
+		netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n");
+		return -EINVAL;
+	}
+
 	vector = vsi->base_vector;
 	if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
 	    (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
@@ -1813,6 +1910,8 @@
 		return -EINVAL;
 	}
 
+	vsi->int_rate_limit = ec->rx_coalesce_usecs_high;
+
 	if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
 	    (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
 		vsi->tx_itr_setting = ec->tx_coalesce_usecs;
@@ -1837,11 +1936,14 @@
 		vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
 
 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+		u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit);
+
 		q_vector = vsi->q_vectors[i];
 		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
 		wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
 		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
 		wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
+		wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
 		i40e_flush(hw);
 	}
 
@@ -2604,10 +2706,51 @@
 
 	ret_flags |= pf->hw.func_caps.npar_enable ?
 		I40E_PRIV_FLAGS_NPAR_FLAG : 0;
+	ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
+		I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
+	ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
+		I40E_PRIV_FLAGS_FD_ATR : 0;
+	ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
+		I40E_PRIV_FLAGS_VEB_STATS : 0;
 
 	return ret_flags;
 }
 
+/**
+ * i40e_set_priv_flags - set private flags
+ * @dev: network interface device structure
+ * @flags: bit flags to be set
+ **/
+static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+{
+	struct i40e_netdev_priv *np = netdev_priv(dev);
+	struct i40e_vsi *vsi = np->vsi;
+	struct i40e_pf *pf = vsi->back;
+
+	if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
+		pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
+	else
+		pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
+
+	/* allow the user to control the state of the Flow
+	 * Director ATR (Application Targeted Routing) feature
+	 * of the driver
+	 */
+	if (flags & I40E_PRIV_FLAGS_FD_ATR) {
+		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+	} else {
+		pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+		pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+	}
+
+	if (flags & I40E_PRIV_FLAGS_VEB_STATS)
+		pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
+	else
+		pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
+	return 0;
+}
+
 static const struct ethtool_ops i40e_ethtool_ops = {
 	.get_settings		= i40e_get_settings,
 	.set_settings		= i40e_set_settings,
@@ -2644,6 +2787,7 @@
 	.set_channels		= i40e_set_channels,
 	.get_ts_info		= i40e_get_ts_info,
 	.get_priv_flags		= i40e_get_priv_flags,
+	.set_priv_flags		= i40e_set_priv_flags,
 };
 
 void i40e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 5ea75dd..fe5d9bf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -272,10 +272,8 @@
 /**
  * i40e_fcoe_sw_init - sets up the HW for FCoE
  * @pf: pointer to PF
- *
- * Returns 0 if FCoE is supported otherwise the error code
  **/
-int i40e_init_pf_fcoe(struct i40e_pf *pf)
+void i40e_init_pf_fcoe(struct i40e_pf *pf)
 {
 	struct i40e_hw *hw = &pf->hw;
 	u32 val;
@@ -286,14 +284,14 @@
 	pf->fcoe_hmc_filt_num = 0;
 
 	if (!pf->hw.func_caps.fcoe) {
-		dev_info(&pf->pdev->dev, "FCoE capability is disabled\n");
-		return 0;
+		dev_dbg(&pf->pdev->dev, "FCoE capability is disabled\n");
+		return;
 	}
 
 	if (!pf->hw.func_caps.dcb) {
 		dev_warn(&pf->pdev->dev,
 			 "Hardware is not DCB capable not enabling FCoE.\n");
-		return 0;
+		return;
 	}
 
 	/* enable FCoE hash filter */
@@ -326,7 +324,6 @@
 	wr32(hw, I40E_GLFCOE_RCTL, val);
 
 	dev_info(&pf->pdev->dev, "FCoE is supported.\n");
-	return 0;
 }
 
 /**
@@ -1519,10 +1516,12 @@
 	 * same PCI function.
 	 */
 	netdev->dev_port = 1;
+	spin_lock_bh(&vsi->mac_filter_list_lock);
 	i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
 	i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
 	i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
 	i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
+	spin_unlock_bh(&vsi->mac_filter_list_lock);
 
 	/* use san mac */
 	ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index fa371a2..79ae7be 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -431,9 +431,8 @@
 			pd_idx1 = max(pd_idx,
 				      ((j - 1) * I40E_HMC_MAX_BP_COUNT));
 			pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
-			for (i = pd_idx1; i < pd_lmt1; i++) {
+			for (i = pd_idx1; i < pd_lmt1; i++)
 				i40e_remove_pd_bp(hw, info->hmc_info, i);
-			}
 			i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
 			break;
 		case I40E_SD_TYPE_DIRECT:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 52e58f30..b825f97 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 21
+#define DRV_VERSION_BUILD 46
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -75,10 +75,13 @@
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
 	/* required last entry */
 	{0, }
 };
@@ -213,10 +216,10 @@
 			ret = i;
 			pile->search_hint = i + j;
 			break;
-		} else {
-			/* not enough, so skip over it and continue looking */
-			i += j;
 		}
+
+		/* not enough, so skip over it and continue looking */
+		i += j;
 	}
 
 	return ret;
@@ -474,6 +477,7 @@
 	stats->tx_errors	= vsi_stats->tx_errors;
 	stats->tx_dropped	= vsi_stats->tx_dropped;
 	stats->rx_errors	= vsi_stats->rx_errors;
+	stats->rx_dropped	= vsi_stats->rx_dropped;
 	stats->rx_crc_errors	= vsi_stats->rx_crc_errors;
 	stats->rx_length_errors	= vsi_stats->rx_length_errors;
 
@@ -499,11 +503,11 @@
 	memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
 	if (vsi->rx_rings && vsi->rx_rings[0]) {
 		for (i = 0; i < vsi->num_queue_pairs; i++) {
-			memset(&vsi->rx_rings[i]->stats, 0 ,
+			memset(&vsi->rx_rings[i]->stats, 0,
 			       sizeof(vsi->rx_rings[i]->stats));
-			memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+			memset(&vsi->rx_rings[i]->rx_stats, 0,
 			       sizeof(vsi->rx_rings[i]->rx_stats));
-			memset(&vsi->tx_rings[i]->stats, 0 ,
+			memset(&vsi->tx_rings[i]->stats, 0,
 			       sizeof(vsi->tx_rings[i]->stats));
 			memset(&vsi->tx_rings[i]->tx_stats, 0,
 			       sizeof(vsi->tx_rings[i]->tx_stats));
@@ -839,6 +843,7 @@
 
 	for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
 		u64 prio_xoff = nsd->priority_xoff_rx[i];
+
 		i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
 				   pf->stat_offsets_loaded,
 				   &osd->priority_xoff_rx[i],
@@ -875,6 +880,7 @@
 	u32 rx_page, rx_buf;
 	u64 bytes, packets;
 	unsigned int start;
+	u64 tx_linearize;
 	u64 rx_p, rx_b;
 	u64 tx_p, tx_b;
 	u16 q;
@@ -893,7 +899,7 @@
 	 */
 	rx_b = rx_p = 0;
 	tx_b = tx_p = 0;
-	tx_restart = tx_busy = 0;
+	tx_restart = tx_busy = tx_linearize = 0;
 	rx_page = 0;
 	rx_buf = 0;
 	rcu_read_lock();
@@ -910,6 +916,7 @@
 		tx_p += packets;
 		tx_restart += p->tx_stats.restart_queue;
 		tx_busy += p->tx_stats.tx_busy;
+		tx_linearize += p->tx_stats.tx_linearize;
 
 		/* Rx queue is part of the same block as Tx queue */
 		p = &p[1];
@@ -926,6 +933,7 @@
 	rcu_read_unlock();
 	vsi->tx_restart = tx_restart;
 	vsi->tx_busy = tx_busy;
+	vsi->tx_linearize = tx_linearize;
 	vsi->rx_page_failed = rx_page;
 	vsi->rx_buf_failed = rx_buf;
 
@@ -1269,7 +1277,7 @@
 	 * so we have to go through all the list in order to make sure
 	 */
 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
-		if (f->vlan >= 0)
+		if (f->vlan >= 0 || vsi->info.pvid)
 			return true;
 	}
 
@@ -1347,6 +1355,9 @@
  * @is_netdev: make sure its a netdev filter, else doesn't matter
  *
  * Returns ptr to the filter object or NULL when no memory available.
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held.
  **/
 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
 					u8 *macaddr, s16 vlan,
@@ -1405,6 +1416,9 @@
  * @vlan: the vlan
  * @is_vf: make sure it's a VF filter, else doesn't matter
  * @is_netdev: make sure it's a netdev filter, else doesn't matter
+ *
+ * NOTE: This function is expected to be called with mac_filter_list_lock
+ * being held.
  **/
 void i40e_del_filter(struct i40e_vsi *vsi,
 		     u8 *macaddr, s16 vlan,
@@ -1432,6 +1446,7 @@
 	} else {
 		/* make sure we don't remove a filter in use by VF or netdev */
 		int min_f = 0;
+
 		min_f += (f->is_vf ? 1 : 0);
 		min_f += (f->is_netdev ? 1 : 0);
 
@@ -1490,6 +1505,7 @@
 
 	if (vsi->type == I40E_VSI_MAIN) {
 		i40e_status ret;
+
 		ret = i40e_aq_mac_address_write(&vsi->back->hw,
 						I40E_AQC_WRITE_TYPE_LAA_WOL,
 						addr->sa_data, NULL);
@@ -1509,8 +1525,10 @@
 		element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
 		i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
 	} else {
+		spin_lock_bh(&vsi->mac_filter_list_lock);
 		i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
 				false, false);
+		spin_unlock_bh(&vsi->mac_filter_list_lock);
 	}
 
 	if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
@@ -1521,10 +1539,12 @@
 		element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
 		i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
 	} else {
+		spin_lock_bh(&vsi->mac_filter_list_lock);
 		f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
 				    false, false);
 		if (f)
 			f->is_laa = true;
+		spin_unlock_bh(&vsi->mac_filter_list_lock);
 	}
 
 	i40e_sync_vsi_filters(vsi, false);
@@ -1697,6 +1717,8 @@
 	struct netdev_hw_addr *mca;
 	struct netdev_hw_addr *ha;
 
+	spin_lock_bh(&vsi->mac_filter_list_lock);
+
 	/* add addr if not already in the filter list */
 	netdev_for_each_uc_addr(uca, netdev) {
 		if (!i40e_find_mac(vsi, uca->addr, false, true)) {
@@ -1722,37 +1744,29 @@
 
 	/* remove filter if not in netdev list */
 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
-		bool found = false;
 
 		if (!f->is_netdev)
 			continue;
 
-		if (is_multicast_ether_addr(f->macaddr)) {
-			netdev_for_each_mc_addr(mca, netdev) {
-				if (ether_addr_equal(mca->addr, f->macaddr)) {
-					found = true;
-					break;
-				}
-			}
-		} else {
-			netdev_for_each_uc_addr(uca, netdev) {
-				if (ether_addr_equal(uca->addr, f->macaddr)) {
-					found = true;
-					break;
-				}
-			}
+		netdev_for_each_mc_addr(mca, netdev)
+			if (ether_addr_equal(mca->addr, f->macaddr))
+				goto bottom_of_search_loop;
 
-			for_each_dev_addr(netdev, ha) {
-				if (ether_addr_equal(ha->addr, f->macaddr)) {
-					found = true;
-					break;
-				}
-			}
-		}
-		if (!found)
-			i40e_del_filter(
-			   vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+		netdev_for_each_uc_addr(uca, netdev)
+			if (ether_addr_equal(uca->addr, f->macaddr))
+				goto bottom_of_search_loop;
+
+		for_each_dev_addr(netdev, ha)
+			if (ether_addr_equal(ha->addr, f->macaddr))
+				goto bottom_of_search_loop;
+
+		/* f->macaddr wasn't found in uc, mc, or ha list so delete it */
+		i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+
+bottom_of_search_loop:
+		continue;
 	}
+	spin_unlock_bh(&vsi->mac_filter_list_lock);
 
 	/* check for other flag changes */
 	if (vsi->current_netdev_flags != vsi->netdev->flags) {
@@ -1762,6 +1776,79 @@
 }
 
 /**
+ * i40e_mac_filter_entry_clone - Clones a MAC filter entry
+ * @src: source MAC filter entry to be clones
+ *
+ * Returns the pointer to newly cloned MAC filter entry or NULL
+ * in case of error
+ **/
+static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
+					struct i40e_mac_filter *src)
+{
+	struct i40e_mac_filter *f;
+
+	f = kzalloc(sizeof(*f), GFP_ATOMIC);
+	if (!f)
+		return NULL;
+	*f = *src;
+
+	INIT_LIST_HEAD(&f->list);
+
+	return f;
+}
+
+/**
+ * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: pointer to vsi struct
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ *        those entries needs to be undone.
+ *
+ * MAC filter entries from list were slated to be removed from device.
+ **/
+static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
+					 struct list_head *from)
+{
+	struct i40e_mac_filter *f, *ftmp;
+
+	list_for_each_entry_safe(f, ftmp, from, list) {
+		f->changed = true;
+		/* Move the element back into MAC filter list*/
+		list_move_tail(&f->list, &vsi->mac_filter_list);
+	}
+}
+
+/**
+ * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: pointer to vsi struct
+ *
+ * MAC filter entries from list were slated to be added from device.
+ **/
+static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
+{
+	struct i40e_mac_filter *f, *ftmp;
+
+	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+		if (!f->changed && f->counter)
+			f->changed = true;
+	}
+}
+
+/**
+ * i40e_cleanup_add_list - Deletes the element from add list and release
+ *			memory
+ * @add_list: Pointer to list which contains MAC filter entries
+ **/
+static void i40e_cleanup_add_list(struct list_head *add_list)
+{
+	struct i40e_mac_filter *f, *ftmp;
+
+	list_for_each_entry_safe(f, ftmp, add_list, list) {
+		list_del(&f->list);
+		kfree(f);
+	}
+}
+
+/**
  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
  * @vsi: ptr to the VSI
  * @grab_rtnl: whether RTNL needs to be grabbed
@@ -1772,11 +1859,13 @@
  **/
 int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
 {
-	struct i40e_mac_filter *f, *ftmp;
+	struct list_head tmp_del_list, tmp_add_list;
+	struct i40e_mac_filter *f, *ftmp, *fclone;
 	bool promisc_forced_on = false;
 	bool add_happened = false;
 	int filter_list_len = 0;
 	u32 changed_flags = 0;
+	bool err_cond = false;
 	i40e_status ret = 0;
 	struct i40e_pf *pf;
 	int num_add = 0;
@@ -1797,17 +1886,13 @@
 		vsi->current_netdev_flags = vsi->netdev->flags;
 	}
 
+	INIT_LIST_HEAD(&tmp_del_list);
+	INIT_LIST_HEAD(&tmp_add_list);
+
 	if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
 		vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
 
-		filter_list_len = pf->hw.aq.asq_buf_size /
-			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
-		del_list = kcalloc(filter_list_len,
-			    sizeof(struct i40e_aqc_remove_macvlan_element_data),
-			    GFP_KERNEL);
-		if (!del_list)
-			return -ENOMEM;
-
+		spin_lock_bh(&vsi->mac_filter_list_lock);
 		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
 			if (!f->changed)
 				continue;
@@ -1815,6 +1900,58 @@
 			if (f->counter != 0)
 				continue;
 			f->changed = false;
+
+			/* Move the element into temporary del_list */
+			list_move_tail(&f->list, &tmp_del_list);
+		}
+
+		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+			if (!f->changed)
+				continue;
+
+			if (f->counter == 0)
+				continue;
+			f->changed = false;
+
+			/* Clone MAC filter entry and add into temporary list */
+			fclone = i40e_mac_filter_entry_clone(f);
+			if (!fclone) {
+				err_cond = true;
+				break;
+			}
+			list_add_tail(&fclone->list, &tmp_add_list);
+		}
+
+		/* if failed to clone MAC filter entry - undo */
+		if (err_cond) {
+			i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+			i40e_undo_add_filter_entries(vsi);
+		}
+		spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+		if (err_cond)
+			i40e_cleanup_add_list(&tmp_add_list);
+	}
+
+	/* Now process 'del_list' outside the lock */
+	if (!list_empty(&tmp_del_list)) {
+		filter_list_len = pf->hw.aq.asq_buf_size /
+			    sizeof(struct i40e_aqc_remove_macvlan_element_data);
+		del_list = kcalloc(filter_list_len,
+			    sizeof(struct i40e_aqc_remove_macvlan_element_data),
+			    GFP_KERNEL);
+		if (!del_list) {
+			i40e_cleanup_add_list(&tmp_add_list);
+
+			/* Undo VSI's MAC filter entry element updates */
+			spin_lock_bh(&vsi->mac_filter_list_lock);
+			i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+			i40e_undo_add_filter_entries(vsi);
+			spin_unlock_bh(&vsi->mac_filter_list_lock);
+			return -ENOMEM;
+		}
+
+		list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
 			cmd_flags = 0;
 
 			/* add to delete list */
@@ -1827,10 +1964,6 @@
 			del_list[num_del].flags = cmd_flags;
 			num_del++;
 
-			/* unlink from filter list */
-			list_del(&f->list);
-			kfree(f);
-
 			/* flush a full buffer */
 			if (num_del == filter_list_len) {
 				ret = i40e_aq_remove_macvlan(&pf->hw,
@@ -1841,12 +1974,18 @@
 				memset(del_list, 0, sizeof(*del_list));
 
 				if (ret && aq_err != I40E_AQ_RC_ENOENT)
-					dev_info(&pf->pdev->dev,
-						 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
-						 i40e_stat_str(&pf->hw, ret),
-						 i40e_aq_str(&pf->hw, aq_err));
+					dev_err(&pf->pdev->dev,
+						"ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+						i40e_stat_str(&pf->hw, ret),
+						i40e_aq_str(&pf->hw, aq_err));
 			}
+			/* Release memory for MAC filter entries which were
+			 * synced up with HW.
+			 */
+			list_del(&f->list);
+			kfree(f);
 		}
+
 		if (num_del) {
 			ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
 						     del_list, num_del, NULL);
@@ -1862,6 +2001,9 @@
 
 		kfree(del_list);
 		del_list = NULL;
+	}
+
+	if (!list_empty(&tmp_add_list)) {
 
 		/* do all the adds now */
 		filter_list_len = pf->hw.aq.asq_buf_size /
@@ -1869,16 +2011,19 @@
 		add_list = kcalloc(filter_list_len,
 			       sizeof(struct i40e_aqc_add_macvlan_element_data),
 			       GFP_KERNEL);
-		if (!add_list)
+		if (!add_list) {
+			/* Purge element from temporary lists */
+			i40e_cleanup_add_list(&tmp_add_list);
+
+			/* Undo add filter entries from VSI MAC filter list */
+			spin_lock_bh(&vsi->mac_filter_list_lock);
+			i40e_undo_add_filter_entries(vsi);
+			spin_unlock_bh(&vsi->mac_filter_list_lock);
 			return -ENOMEM;
+		}
 
-		list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
-			if (!f->changed)
-				continue;
+		list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
 
-			if (f->counter == 0)
-				continue;
-			f->changed = false;
 			add_happened = true;
 			cmd_flags = 0;
 
@@ -1905,7 +2050,13 @@
 					break;
 				memset(add_list, 0, sizeof(*add_list));
 			}
+			/* Entries from tmp_add_list were cloned from MAC
+			 * filter list, hence clean those cloned entries
+			 */
+			list_del(&f->list);
+			kfree(f);
 		}
+
 		if (num_add) {
 			ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
 						  add_list, num_add, NULL);
@@ -1934,6 +2085,7 @@
 	/* check for changes in promiscuous modes */
 	if (changed_flags & IFF_ALLMULTI) {
 		bool cur_multipromisc;
+
 		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
 		ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
 							    vsi->seid,
@@ -1948,6 +2100,7 @@
 	}
 	if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
 		bool cur_promisc;
+
 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
 			       test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
 					&vsi->state));
@@ -2155,6 +2308,9 @@
 	is_vf = (vsi->type == I40E_VSI_SRIOV);
 	is_netdev = !!(vsi->netdev);
 
+	/* Locked once because all functions invoked below iterates list*/
+	spin_lock_bh(&vsi->mac_filter_list_lock);
+
 	if (is_netdev) {
 		add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
 					is_vf, is_netdev);
@@ -2162,6 +2318,7 @@
 			dev_info(&vsi->back->pdev->dev,
 				 "Could not add vlan filter %d for %pM\n",
 				 vid, vsi->netdev->dev_addr);
+			spin_unlock_bh(&vsi->mac_filter_list_lock);
 			return -ENOMEM;
 		}
 	}
@@ -2172,6 +2329,7 @@
 			dev_info(&vsi->back->pdev->dev,
 				 "Could not add vlan filter %d for %pM\n",
 				 vid, f->macaddr);
+			spin_unlock_bh(&vsi->mac_filter_list_lock);
 			return -ENOMEM;
 		}
 	}
@@ -2193,6 +2351,7 @@
 				dev_info(&vsi->back->pdev->dev,
 					 "Could not add filter 0 for %pM\n",
 					 vsi->netdev->dev_addr);
+				spin_unlock_bh(&vsi->mac_filter_list_lock);
 				return -ENOMEM;
 			}
 		}
@@ -2201,22 +2360,28 @@
 	/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
 	if (vid > 0 && !vsi->info.pvid) {
 		list_for_each_entry(f, &vsi->mac_filter_list, list) {
-			if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
-					     is_vf, is_netdev)) {
-				i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
-						is_vf, is_netdev);
-				add_f = i40e_add_filter(vsi, f->macaddr,
-							0, is_vf, is_netdev);
-				if (!add_f) {
-					dev_info(&vsi->back->pdev->dev,
-						 "Could not add filter 0 for %pM\n",
-						 f->macaddr);
-					return -ENOMEM;
-				}
+			if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+					      is_vf, is_netdev))
+				continue;
+			i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+					is_vf, is_netdev);
+			add_f = i40e_add_filter(vsi, f->macaddr,
+						0, is_vf, is_netdev);
+			if (!add_f) {
+				dev_info(&vsi->back->pdev->dev,
+					 "Could not add filter 0 for %pM\n",
+					f->macaddr);
+				spin_unlock_bh(&vsi->mac_filter_list_lock);
+				return -ENOMEM;
 			}
 		}
 	}
 
+	/* Make sure to release before sync_vsi_filter because that
+	 * function will lock/unlock as necessary
+	 */
+	spin_unlock_bh(&vsi->mac_filter_list_lock);
+
 	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
 		return 0;
@@ -2241,6 +2406,9 @@
 	is_vf = (vsi->type == I40E_VSI_SRIOV);
 	is_netdev = !!(netdev);
 
+	/* Locked once because all functions invoked below iterates list */
+	spin_lock_bh(&vsi->mac_filter_list_lock);
+
 	if (is_netdev)
 		i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
 
@@ -2271,6 +2439,7 @@
 			dev_info(&vsi->back->pdev->dev,
 				 "Could not add filter %d for %pM\n",
 				 I40E_VLAN_ANY, netdev->dev_addr);
+			spin_unlock_bh(&vsi->mac_filter_list_lock);
 			return -ENOMEM;
 		}
 	}
@@ -2279,16 +2448,22 @@
 		list_for_each_entry(f, &vsi->mac_filter_list, list) {
 			i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
 			add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
-					    is_vf, is_netdev);
+						is_vf, is_netdev);
 			if (!add_f) {
 				dev_info(&vsi->back->pdev->dev,
 					 "Could not add filter %d for %pM\n",
 					 I40E_VLAN_ANY, f->macaddr);
+				spin_unlock_bh(&vsi->mac_filter_list_lock);
 				return -ENOMEM;
 			}
 		}
 	}
 
+	/* Make sure to release before sync_vsi_filter because that
+	 * function with lock/unlock as necessary
+	 */
+	spin_unlock_bh(&vsi->mac_filter_list_lock);
+
 	if (test_bit(__I40E_DOWN, &vsi->back->state) ||
 	    test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
 		return 0;
@@ -2688,7 +2863,8 @@
 		rx_ctx.lrxqthresh = 2;
 	rx_ctx.crcstrip = 1;
 	rx_ctx.l2tsel = 1;
-	rx_ctx.showiv = 1;
+	/* this controls whether VLAN is stripped from inner headers */
+	rx_ctx.showiv = 0;
 #ifdef I40E_FCOE
 	rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
 #endif
@@ -2897,11 +3073,9 @@
 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
 {
 	struct i40e_pf *pf = vsi->back;
-	struct i40e_q_vector *q_vector;
 	struct i40e_hw *hw = &pf->hw;
 	u16 vector;
 	int i, q;
-	u32 val;
 	u32 qp;
 
 	/* The interrupt indexing is offset by 1 in the PFINT_ITRn
@@ -2911,7 +3085,9 @@
 	qp = vsi->base_queue;
 	vector = vsi->base_vector;
 	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
-		q_vector = vsi->q_vectors[i];
+		struct i40e_q_vector *q_vector = vsi->q_vectors[i];
+
+		q_vector->itr_countdown = ITR_COUNTDOWN_START;
 		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
 		q_vector->rx.latency_range = I40E_LOW_LATENCY;
 		wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2920,10 +3096,14 @@
 		q_vector->tx.latency_range = I40E_LOW_LATENCY;
 		wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
 		     q_vector->tx.itr);
+		wr32(hw, I40E_PFINT_RATEN(vector - 1),
+		     INTRL_USEC_TO_REG(vsi->int_rate_limit));
 
 		/* Linked list for the queuepairs assigned to this vector */
 		wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
 		for (q = 0; q < q_vector->num_ringpairs; q++) {
+			u32 val;
+
 			val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
 			      (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
 			      (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
@@ -3003,6 +3183,7 @@
 	u32 val;
 
 	/* set the ITR configuration */
+	q_vector->itr_countdown = ITR_COUNTDOWN_START;
 	q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
 	q_vector->rx.latency_range = I40E_LOW_LATENCY;
 	wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
@@ -3061,24 +3242,6 @@
 }
 
 /**
- * i40e_irq_dynamic_enable - Enable default interrupt generation settings
- * @vsi: pointer to a vsi
- * @vector: enable a particular Hw Interrupt vector
- **/
-void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
-{
-	struct i40e_pf *pf = vsi->back;
-	struct i40e_hw *hw = &pf->hw;
-	u32 val;
-
-	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-	      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-	      (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
-	wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
-	/* skip the flush */
-}
-
-/**
  * i40e_irq_dynamic_disable - Disable default interrupt generation settings
  * @vsi: pointer to a vsi
  * @vector: disable a particular Hw Interrupt vector
@@ -3106,7 +3269,7 @@
 	if (!q_vector->tx.ring && !q_vector->rx.ring)
 		return IRQ_HANDLED;
 
-	napi_schedule(&q_vector->napi);
+	napi_schedule_irqoff(&q_vector->napi);
 
 	return IRQ_HANDLED;
 }
@@ -3151,8 +3314,7 @@
 				  q_vector);
 		if (err) {
 			dev_info(&pf->pdev->dev,
-				 "%s: request_irq failed, error: %d\n",
-				 __func__, err);
+				 "MSIX request_irq failed, error: %d\n", err);
 			goto free_queue_irqs;
 		}
 		/* assign the mask for this irq */
@@ -3217,8 +3379,7 @@
 	int i;
 
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
-		for (i = vsi->base_vector;
-		     i < (vsi->num_q_vectors + vsi->base_vector); i++)
+		for (i = 0; i < vsi->num_q_vectors; i++)
 			i40e_irq_dynamic_enable(vsi, i);
 	} else {
 		i40e_irq_dynamic_enable_icr0(pf);
@@ -3277,9 +3438,12 @@
 
 	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+		struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+		struct i40e_q_vector *q_vector = vsi->q_vectors[0];
 
 		/* temporarily disable queue cause for NAPI processing */
 		u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+
 		qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
 		wr32(hw, I40E_QINT_RQCTL(0), qval);
 
@@ -3288,7 +3452,7 @@
 		wr32(hw, I40E_QINT_TQCTL(0), qval);
 
 		if (!test_bit(__I40E_DOWN, &pf->state))
-			napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
+			napi_schedule_irqoff(&q_vector->napi);
 	}
 
 	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -3449,10 +3613,9 @@
 	i += tx_ring->count;
 	tx_ring->next_to_clean = i;
 
-	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
-		i40e_irq_dynamic_enable(vsi,
-				tx_ring->q_vector->v_idx + vsi->base_vector);
-	}
+	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
+		i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
+
 	return budget > 0;
 }
 
@@ -3590,14 +3753,12 @@
 	if (test_bit(__I40E_DOWN, &vsi->state))
 		return;
 
-	pf->flags |= I40E_FLAG_IN_NETPOLL;
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
 		for (i = 0; i < vsi->num_q_vectors; i++)
 			i40e_msix_clean_rings(0, vsi->q_vectors[i]);
 	} else {
 		i40e_intr(pf->pdev->irq, netdev);
 	}
-	pf->flags &= ~I40E_FLAG_IN_NETPOLL;
 }
 #endif
 
@@ -3678,9 +3839,8 @@
 		ret = i40e_pf_txq_wait(pf, pf_q, enable);
 		if (ret) {
 			dev_info(&pf->pdev->dev,
-				 "%s: VSI seid %d Tx ring %d %sable timeout\n",
-				 __func__, vsi->seid, pf_q,
-				 (enable ? "en" : "dis"));
+				 "VSI seid %d Tx ring %d %sable timeout\n",
+				 vsi->seid, pf_q, (enable ? "en" : "dis"));
 			break;
 		}
 	}
@@ -3756,9 +3916,8 @@
 		ret = i40e_pf_rxq_wait(pf, pf_q, enable);
 		if (ret) {
 			dev_info(&pf->pdev->dev,
-				 "%s: VSI seid %d Rx ring %d %sable timeout\n",
-				 __func__, vsi->seid, pf_q,
-				 (enable ? "en" : "dis"));
+				 "VSI seid %d Rx ring %d %sable timeout\n",
+				 vsi->seid, pf_q, (enable ? "en" : "dis"));
 			break;
 		}
 	}
@@ -4053,17 +4212,15 @@
 	if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
 	    vsi->type == I40E_VSI_FCOE) {
 		dev_dbg(&vsi->back->pdev->dev,
-			"%s: VSI seid %d skipping FCoE VSI disable\n",
-			 __func__, vsi->seid);
+			 "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
 		return;
 	}
 
 	set_bit(__I40E_NEEDS_RESTART, &vsi->state);
-	if (vsi->netdev && netif_running(vsi->netdev)) {
+	if (vsi->netdev && netif_running(vsi->netdev))
 		vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
-	} else {
+	else
 		i40e_vsi_close(vsi);
-	}
 }
 
 /**
@@ -4128,8 +4285,8 @@
 		ret = i40e_pf_txq_wait(pf, pf_q, false);
 		if (ret) {
 			dev_info(&pf->pdev->dev,
-				 "%s: VSI seid %d Tx ring %d disable timeout\n",
-				 __func__, vsi->seid, pf_q);
+				 "VSI seid %d Tx ring %d disable timeout\n",
+				 vsi->seid, pf_q);
 			return ret;
 		}
 	}
@@ -4862,11 +5019,14 @@
  * i40e_print_link_message - print link up or down
  * @vsi: the VSI for which link needs a message
  */
-static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
+void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 {
-	char speed[SPEED_SIZE] = "Unknown";
-	char fc[FC_SIZE] = "RX/TX";
+	char *speed = "Unknown";
+	char *fc = "Unknown";
 
+	if (vsi->current_isup == isup)
+		return;
+	vsi->current_isup = isup;
 	if (!isup) {
 		netdev_info(vsi->netdev, "NIC Link is Down\n");
 		return;
@@ -4883,19 +5043,19 @@
 
 	switch (vsi->back->hw.phy.link_info.link_speed) {
 	case I40E_LINK_SPEED_40GB:
-		strlcpy(speed, "40 Gbps", SPEED_SIZE);
+		speed = "40 G";
 		break;
 	case I40E_LINK_SPEED_20GB:
-		strncpy(speed, "20 Gbps", SPEED_SIZE);
+		speed = "20 G";
 		break;
 	case I40E_LINK_SPEED_10GB:
-		strlcpy(speed, "10 Gbps", SPEED_SIZE);
+		speed = "10 G";
 		break;
 	case I40E_LINK_SPEED_1GB:
-		strlcpy(speed, "1000 Mbps", SPEED_SIZE);
+		speed = "1000 M";
 		break;
 	case I40E_LINK_SPEED_100MB:
-		strncpy(speed, "100 Mbps", SPEED_SIZE);
+		speed = "100 M";
 		break;
 	default:
 		break;
@@ -4903,20 +5063,20 @@
 
 	switch (vsi->back->hw.fc.current_mode) {
 	case I40E_FC_FULL:
-		strlcpy(fc, "RX/TX", FC_SIZE);
+		fc = "RX/TX";
 		break;
 	case I40E_FC_TX_PAUSE:
-		strlcpy(fc, "TX", FC_SIZE);
+		fc = "TX";
 		break;
 	case I40E_FC_RX_PAUSE:
-		strlcpy(fc, "RX", FC_SIZE);
+		fc = "RX";
 		break;
 	default:
-		strlcpy(fc, "None", FC_SIZE);
+		fc = "None";
 		break;
 	}
 
-	netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
+	netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
 		    speed, fc);
 }
 
@@ -5335,15 +5495,13 @@
 			 "VSI reinit requested\n");
 		for (v = 0; v < pf->num_alloc_vsi; v++) {
 			struct i40e_vsi *vsi = pf->vsi[v];
+
 			if (vsi != NULL &&
 			    test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
 				i40e_vsi_reinit_locked(pf->vsi[v]);
 				clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
 			}
 		}
-
-		/* no further action needed, so return now */
-		return;
 	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
 		int v;
 
@@ -5351,6 +5509,7 @@
 		dev_info(&pf->pdev->dev, "VSI down requested\n");
 		for (v = 0; v < pf->num_alloc_vsi; v++) {
 			struct i40e_vsi *vsi = pf->vsi[v];
+
 			if (vsi != NULL &&
 			    test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
 				set_bit(__I40E_DOWN, &vsi->state);
@@ -5358,13 +5517,9 @@
 				clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
 			}
 		}
-
-		/* no further action needed, so return now */
-		return;
 	} else {
 		dev_info(&pf->pdev->dev,
 			 "bad reset request 0x%08x\n", reset_flags);
-		return;
 	}
 }
 
@@ -5420,8 +5575,7 @@
 		dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
 	}
 
-	dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
-		need_reconfig);
+	dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
 	return need_reconfig;
 }
 
@@ -5448,16 +5602,14 @@
 	/* Ignore if event is not for Nearest Bridge */
 	type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
 		& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
-	dev_dbg(&pf->pdev->dev,
-		"%s: LLDP event mib bridge type 0x%x\n", __func__, type);
+	dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
 	if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
 		return ret;
 
 	/* Check MIB Type and return if event for Remote MIB update */
 	type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
 	dev_dbg(&pf->pdev->dev,
-		"%s: LLDP event mib type %s\n", __func__,
-		type ? "remote" : "local");
+		"LLDP event mib type %s\n", type ? "remote" : "local");
 	if (type == I40E_AQ_LLDP_MIB_REMOTE) {
 		/* Update the remote cached instance and return */
 		ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
@@ -5642,7 +5794,9 @@
  **/
 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
 {
+	struct i40e_fdir_filter *filter;
 	u32 fcnt_prog, fcnt_avail;
+	struct hlist_node *node;
 
 	if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
 		return;
@@ -5671,6 +5825,18 @@
 				dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
 		}
 	}
+
+	/* if hw had a problem adding a filter, delete it */
+	if (pf->fd_inv > 0) {
+		hlist_for_each_entry_safe(filter, node,
+					  &pf->fdir_filter_list, fdir_node) {
+			if (filter->fd_id == pf->fd_inv) {
+				hlist_del(&filter->fdir_node);
+				kfree(filter);
+				pf->fdir_pf_active_filters--;
+			}
+		}
+	}
 }
 
 #define I40E_MIN_FD_FLUSH_INTERVAL 10
@@ -5690,49 +5856,51 @@
 	if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
 		return;
 
-	if (time_after(jiffies, pf->fd_flush_timestamp +
-				(I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
-		/* If the flush is happening too quick and we have mostly
-		 * SB rules we should not re-enable ATR for some time.
-		 */
-		min_flush_time = pf->fd_flush_timestamp
-				+ (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
-		fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
+	if (!time_after(jiffies, pf->fd_flush_timestamp +
+				 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
+		return;
 
-		if (!(time_after(jiffies, min_flush_time)) &&
-		    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
-			if (I40E_DEBUG_FD & pf->hw.debug_mask)
-				dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
-			disable_atr = true;
-		}
+	/* If the flush is happening too quick and we have mostly SB rules we
+	 * should not re-enable ATR for some time.
+	 */
+	min_flush_time = pf->fd_flush_timestamp +
+			 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
+	fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
 
-		pf->fd_flush_timestamp = jiffies;
-		pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-		/* flush all filters */
-		wr32(&pf->hw, I40E_PFQF_CTL_1,
-		     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
-		i40e_flush(&pf->hw);
-		pf->fd_flush_cnt++;
-		pf->fd_add_err = 0;
-		do {
-			/* Check FD flush status every 5-6msec */
-			usleep_range(5000, 6000);
-			reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
-			if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
-				break;
-		} while (flush_wait_retry--);
-		if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
-			dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
-		} else {
-			/* replay sideband filters */
-			i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
-			if (!disable_atr)
-				pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-			clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
-			if (I40E_DEBUG_FD & pf->hw.debug_mask)
-				dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
-		}
+	if (!(time_after(jiffies, min_flush_time)) &&
+	    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
+		if (I40E_DEBUG_FD & pf->hw.debug_mask)
+			dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+		disable_atr = true;
 	}
+
+	pf->fd_flush_timestamp = jiffies;
+	pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+	/* flush all filters */
+	wr32(&pf->hw, I40E_PFQF_CTL_1,
+	     I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
+	i40e_flush(&pf->hw);
+	pf->fd_flush_cnt++;
+	pf->fd_add_err = 0;
+	do {
+		/* Check FD flush status every 5-6msec */
+		usleep_range(5000, 6000);
+		reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
+		if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
+			break;
+	} while (flush_wait_retry--);
+	if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
+		dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
+	} else {
+		/* replay sideband filters */
+		i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+		if (!disable_atr)
+			pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+		clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
+		if (I40E_DEBUG_FD & pf->hw.debug_mask)
+			dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+	}
+
 }
 
 /**
@@ -5840,15 +6008,23 @@
  **/
 static void i40e_link_event(struct i40e_pf *pf)
 {
-	bool new_link, old_link;
 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 	u8 new_link_speed, old_link_speed;
+	i40e_status status;
+	bool new_link, old_link;
 
 	/* set this to force the get_link_status call to refresh state */
 	pf->hw.phy.get_link_info = true;
 
 	old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
-	new_link = i40e_get_link_status(&pf->hw);
+
+	status = i40e_get_link_status(&pf->hw, &new_link);
+	if (status) {
+		dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
+			status);
+		return;
+	}
+
 	old_link_speed = pf->hw.phy.link_info_old.link_speed;
 	new_link_speed = pf->hw.phy.link_info.link_speed;
 
@@ -5895,7 +6071,8 @@
 		return;
 	pf->service_timer_previous = jiffies;
 
-	i40e_link_event(pf);
+	if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
+		i40e_link_event(pf);
 
 	/* Update the stats for active netdevs so the network stack
 	 * can look at updated numbers whenever it cares to
@@ -5904,10 +6081,12 @@
 		if (pf->vsi[i] && pf->vsi[i]->netdev)
 			i40e_update_stats(pf->vsi[i]);
 
-	/* Update the stats for the active switching components */
-	for (i = 0; i < I40E_MAX_VEB; i++)
-		if (pf->veb[i])
-			i40e_update_veb_stats(pf->veb[i]);
+	if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
+		/* Update the stats for the active switching components */
+		for (i = 0; i < I40E_MAX_VEB; i++)
+			if (pf->veb[i])
+				i40e_update_veb_stats(pf->veb[i]);
+	}
 
 	i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
 }
@@ -6218,8 +6397,9 @@
 {
 	struct i40e_pf *pf = veb->pf;
 
-	dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
-		 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+	if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+		dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
+			 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
 	if (veb->bridge_mode & BRIDGE_MODE_VEPA)
 		i40e_disable_pf_switch_lb(pf);
 	else
@@ -6286,6 +6466,7 @@
 
 		if (pf->vsi[v]->veb_idx == veb->idx) {
 			struct i40e_vsi *vsi = pf->vsi[v];
+
 			vsi->uplink_seid = veb->seid;
 			ret = i40e_add_vsi(vsi);
 			if (ret) {
@@ -6350,12 +6531,6 @@
 		}
 	} while (err);
 
-	if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
-	    (pf->hw.aq.fw_maj_ver < 2)) {
-		pf->hw.func_caps.num_msix_vectors++;
-		pf->hw.func_caps.num_msix_vectors_vf++;
-	}
-
 	if (pf->hw.debug_mask & I40E_DEBUG_USER)
 		dev_info(&pf->pdev->dev,
 			 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -6568,9 +6743,7 @@
 	}
 #endif /* CONFIG_I40E_DCB */
 #ifdef I40E_FCOE
-	ret = i40e_init_pf_fcoe(pf);
-	if (ret)
-		dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
+	i40e_init_pf_fcoe(pf);
 
 #endif
 	/* do basic switch setup */
@@ -6592,9 +6765,9 @@
 	/* make sure our flow control settings are restored */
 	ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
 	if (ret)
-		dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
-			 i40e_stat_str(&pf->hw, ret),
-			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
+			i40e_stat_str(&pf->hw, ret),
+			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
 	/* Rebuild the VSIs and VEBs that existed before reset.
 	 * They are still in our local switch element arrays, so only
@@ -6664,6 +6837,15 @@
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
 		ret = i40e_setup_misc_vector(pf);
 
+	/* Add a filter to drop all Flow control frames from any VSI from being
+	 * transmitted. By doing so we stop a malicious VF from sending out
+	 * PAUSE or PFC frames and potentially controlling traffic for other
+	 * PF/VF VSIs.
+	 * The FW can still send Flow control frames if enabled.
+	 */
+	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+						       pf->main_vsi_seid);
+
 	/* restart the VSIs that were rebuilt and running before the reset */
 	i40e_pf_unquiesce_all_vsi(pf);
 
@@ -7046,6 +7228,7 @@
 	vsi->idx = vsi_idx;
 	vsi->rx_itr_setting = pf->rx_itr_default;
 	vsi->tx_itr_setting = pf->tx_itr_default;
+	vsi->int_rate_limit = 0;
 	vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
 				pf->rss_table_size : 64;
 	vsi->netdev_registered = false;
@@ -7064,6 +7247,8 @@
 	/* Setup default MSIX irq handler for VSI */
 	i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
 
+	/* Initialize VSI lock */
+	spin_lock_init(&vsi->mac_filter_list_lock);
 	pf->vsi[vsi_idx] = vsi;
 	ret = vsi_idx;
 	goto unlock_pf;
@@ -7621,7 +7806,7 @@
 			 "Cannot set RSS key, err %s aq_err %s\n",
 			 i40e_stat_str(&pf->hw, ret),
 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
-		return ret;
+		goto config_rss_aq_out;
 	}
 
 	if (vsi->type == I40E_VSI_MAIN)
@@ -7635,6 +7820,8 @@
 			 i40e_stat_str(&pf->hw, ret),
 			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
+config_rss_aq_out:
+	kfree(rss_lut);
 	return ret;
 }
 
@@ -7909,6 +8096,7 @@
 	/* Set default capability flags */
 	pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
 		    I40E_FLAG_MSI_ENABLED     |
+		    I40E_FLAG_LINK_POLLING_ENABLED |
 		    I40E_FLAG_MSIX_ENABLED;
 
 	if (iommu_present(&pci_bus_type))
@@ -7951,12 +8139,12 @@
 	    (pf->hw.func_caps.fd_filters_best_effort > 0)) {
 		pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
 		pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-		if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
-			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
-		} else {
+		if (pf->flags & I40E_FLAG_MFP_ENABLED &&
+		    pf->hw.num_partitions > 1)
 			dev_info(&pf->pdev->dev,
 				 "Flow Director Sideband mode Disabled in MFP mode\n");
-		}
+		else
+			pf->flags |= I40E_FLAG_FD_SB_ENABLED;
 		pf->fdir_pf_filter_count =
 				 pf->hw.func_caps.fd_filters_guaranteed;
 		pf->hw.fdir_shared_filter_count =
@@ -7966,12 +8154,11 @@
 	if (pf->hw.func_caps.vmdq) {
 		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
 		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+		pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
 	}
 
 #ifdef I40E_FCOE
-	err = i40e_init_pf_fcoe(pf);
-	if (err)
-		dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
+	i40e_init_pf_fcoe(pf);
 
 #endif /* I40E_FCOE */
 #ifdef CONFIG_PCI_IOV
@@ -7995,6 +8182,9 @@
 	pf->lan_veb = I40E_NO_VEB;
 	pf->lan_vsi = I40E_NO_VSI;
 
+	/* By default FW has this off for performance reasons */
+	pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
+
 	/* set up queue assignment tracking */
 	size = sizeof(struct i40e_lump_tracking)
 		+ (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
@@ -8174,9 +8364,6 @@
 		pf->vxlan_ports[idx] = 0;
 		pf->pending_vxlan_bitmap |= BIT_ULL(idx);
 		pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
-
-		dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
-			 ntohs(port));
 	} else {
 		netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
 			    ntohs(port));
@@ -8328,13 +8515,15 @@
  * @seq: RTNL message seq #
  * @dev: the netdev being configured
  * @filter_mask: unused
+ * @nlflags: netlink flags passed in
  *
  * Return the mode in which the hardware bridge is operating in
  * i.e VEB or VEPA.
  **/
 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 				   struct net_device *dev,
-				   u32 filter_mask, int nlflags)
+				   u32 __always_unused filter_mask,
+				   int nlflags)
 {
 	struct i40e_netdev_priv *np = netdev_priv(dev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -8363,7 +8552,7 @@
 /**
  * i40e_features_check - Validate encapsulated packet conforms to limits
  * @skb: skb buff
- * @netdev: This physical port's netdev
+ * @dev: This physical port's netdev
  * @features: Offload features that the stack believes apply
  **/
 static netdev_features_t i40e_features_check(struct sk_buff *skb,
@@ -8444,6 +8633,7 @@
 
 	netdev->hw_enc_features |= NETIF_F_IP_CSUM	 |
 				  NETIF_F_GSO_UDP_TUNNEL |
+				  NETIF_F_GSO_GRE	 |
 				  NETIF_F_TSO;
 
 	netdev->features = NETIF_F_SG		       |
@@ -8451,6 +8641,7 @@
 			   NETIF_F_SCTP_CSUM	       |
 			   NETIF_F_HIGHDMA	       |
 			   NETIF_F_GSO_UDP_TUNNEL      |
+			   NETIF_F_GSO_GRE	       |
 			   NETIF_F_HW_VLAN_CTAG_TX     |
 			   NETIF_F_HW_VLAN_CTAG_RX     |
 			   NETIF_F_HW_VLAN_CTAG_FILTER |
@@ -8476,17 +8667,26 @@
 		 * default a MAC-VLAN filter that accepts any tagged packet
 		 * which must be replaced by a normal filter.
 		 */
-		if (!i40e_rm_default_mac_filter(vsi, mac_addr))
+		if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
+			spin_lock_bh(&vsi->mac_filter_list_lock);
 			i40e_add_filter(vsi, mac_addr,
 					I40E_VLAN_ANY, false, true);
+			spin_unlock_bh(&vsi->mac_filter_list_lock);
+		}
 	} else {
 		/* relate the VSI_VMDQ name to the VSI_MAIN name */
 		snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
 			 pf->vsi[pf->lan_vsi]->netdev->name);
 		random_ether_addr(mac_addr);
+
+		spin_lock_bh(&vsi->mac_filter_list_lock);
 		i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
+		spin_unlock_bh(&vsi->mac_filter_list_lock);
 	}
+
+	spin_lock_bh(&vsi->mac_filter_list_lock);
 	i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
+	spin_unlock_bh(&vsi->mac_filter_list_lock);
 
 	ether_addr_copy(netdev->dev_addr, mac_addr);
 	ether_addr_copy(netdev->perm_addr, mac_addr);
@@ -8542,12 +8742,22 @@
 		return 1;
 
 	veb = pf->veb[vsi->veb_idx];
-	/* Uplink is a bridge in VEPA mode */
-	if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
-		return 0;
+	if (!veb) {
+		dev_info(&pf->pdev->dev,
+			 "There is no veb associated with the bridge\n");
+		return -ENOENT;
+	}
 
-	/* Uplink is a bridge in VEB mode */
-	return 1;
+	/* Uplink is a bridge in VEPA mode */
+	if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
+		return 0;
+	} else {
+		/* Uplink is a bridge in VEB mode */
+		return 1;
+	}
+
+	/* VEPA is now default bridge, so return 0 */
+	return 0;
 }
 
 /**
@@ -8560,10 +8770,13 @@
 static int i40e_add_vsi(struct i40e_vsi *vsi)
 {
 	int ret = -ENODEV;
-	struct i40e_mac_filter *f, *ftmp;
+	u8 laa_macaddr[ETH_ALEN];
+	bool found_laa_mac_filter = false;
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
 	struct i40e_vsi_context ctxt;
+	struct i40e_mac_filter *f, *ftmp;
+
 	u8 enabled_tc = 0x1; /* TC0 enabled */
 	int f_count = 0;
 
@@ -8735,32 +8948,41 @@
 		vsi->id = ctxt.vsi_number;
 	}
 
+	spin_lock_bh(&vsi->mac_filter_list_lock);
 	/* If macvlan filters already exist, force them to get loaded */
 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
 		f->changed = true;
 		f_count++;
 
+		/* Expected to have only one MAC filter entry for LAA in list */
 		if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
-			struct i40e_aqc_remove_macvlan_element_data element;
-
-			memset(&element, 0, sizeof(element));
-			ether_addr_copy(element.mac_addr, f->macaddr);
-			element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
-			ret = i40e_aq_remove_macvlan(hw, vsi->seid,
-						     &element, 1, NULL);
-			if (ret) {
-				/* some older FW has a different default */
-				element.flags |=
-					       I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
-				i40e_aq_remove_macvlan(hw, vsi->seid,
-						       &element, 1, NULL);
-			}
-
-			i40e_aq_mac_address_write(hw,
-						  I40E_AQC_WRITE_TYPE_LAA_WOL,
-						  f->macaddr, NULL);
+			ether_addr_copy(laa_macaddr, f->macaddr);
+			found_laa_mac_filter = true;
 		}
 	}
+	spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+	if (found_laa_mac_filter) {
+		struct i40e_aqc_remove_macvlan_element_data element;
+
+		memset(&element, 0, sizeof(element));
+		ether_addr_copy(element.mac_addr, laa_macaddr);
+		element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+		ret = i40e_aq_remove_macvlan(hw, vsi->seid,
+					     &element, 1, NULL);
+		if (ret) {
+			/* some older FW has a different default */
+			element.flags |=
+				       I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+			i40e_aq_remove_macvlan(hw, vsi->seid,
+					       &element, 1, NULL);
+		}
+
+		i40e_aq_mac_address_write(hw,
+					  I40E_AQC_WRITE_TYPE_LAA_WOL,
+					  laa_macaddr, NULL);
+	}
+
 	if (f_count) {
 		vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
 		pf->flags |= I40E_FLAG_FILTER_SYNC;
@@ -8823,9 +9045,12 @@
 		i40e_vsi_disable_irq(vsi);
 	}
 
+	spin_lock_bh(&vsi->mac_filter_list_lock);
 	list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
 		i40e_del_filter(vsi, f->macaddr, f->vlan,
 				f->is_vf, f->is_netdev);
+	spin_unlock_bh(&vsi->mac_filter_list_lock);
+
 	i40e_sync_vsi_filters(vsi, false);
 
 	i40e_vsi_delete(vsi);
@@ -9051,8 +9276,7 @@
 		if (veb) {
 			if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
 				dev_info(&vsi->back->pdev->dev,
-					 "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
-					 __func__);
+					 "New VSI creation error, uplink seid of LAN VSI expected.\n");
 				return NULL;
 			}
 			/* We come up by default in VEPA mode if SRIOV is not
@@ -9702,6 +9926,7 @@
 	} else {
 		/* force a reset of TC and queue layout configurations */
 		u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+
 		pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
 		pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
 		i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
@@ -9725,7 +9950,7 @@
 		i40e_config_rss(pf);
 
 	/* fill in link information and enable LSE reporting */
-	i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+	i40e_update_link_info(&pf->hw);
 	i40e_link_event(pf);
 
 	/* Initialize user-specific link properties */
@@ -9843,8 +10068,14 @@
 	}
 
 	pf->queues_left = queues_left;
+	dev_dbg(&pf->pdev->dev,
+		"qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
+		pf->hw.func_caps.num_tx_qp,
+		!!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
+		pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps,
+		pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left);
 #ifdef I40E_FCOE
-	dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
+	dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
 #endif
 }
 
@@ -9912,12 +10143,19 @@
 	}
 	if (pf->flags & I40E_FLAG_DCB_CAPABLE)
 		buf += sprintf(buf, "DCB ");
+#if IS_ENABLED(CONFIG_VXLAN)
+	buf += sprintf(buf, "VxLAN ");
+#endif
 	if (pf->flags & I40E_FLAG_PTP)
 		buf += sprintf(buf, "PTP ");
 #ifdef I40E_FCOE
 	if (pf->flags & I40E_FLAG_FCOE_ENABLED)
 		buf += sprintf(buf, "FCOE ");
 #endif
+	if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+		buf += sprintf(buf, "VEB ");
+	else
+		buf += sprintf(buf, "VEPA ");
 
 	BUG_ON(buf > (string + INFO_STRING_LEN));
 	dev_info(&pf->pdev->dev, "%s\n", string);
@@ -9938,14 +10176,15 @@
 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	struct i40e_aq_get_phy_abilities_resp abilities;
-	unsigned long ioremap_len;
 	struct i40e_pf *pf;
 	struct i40e_hw *hw;
 	static u16 pfs_found;
+	u16 wol_nvm_bits;
 	u16 link_status;
-	int err = 0;
+	int err;
 	u32 len;
 	u32 i;
+	u8 set_fc_aq_fail;
 
 	err = pci_enable_device_mem(pdev);
 	if (err)
@@ -9991,15 +10230,15 @@
 	hw = &pf->hw;
 	hw->back = pf;
 
-	ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
-			    I40E_MAX_CSR_SPACE);
+	pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
+				I40E_MAX_CSR_SPACE);
 
-	hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
+	hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
 	if (!hw->hw_addr) {
 		err = -EIO;
 		dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
 			 (unsigned int)pci_resource_start(pdev, 0),
-			 (unsigned int)pci_resource_len(pdev, 0), err);
+			 pf->ioremap_len, err);
 		goto err_ioremap;
 	}
 	hw->vendor_id = pdev->vendor;
@@ -10057,7 +10296,13 @@
 	pf->hw.fc.requested_mode = I40E_FC_NONE;
 
 	err = i40e_init_adminq(hw);
-	dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+
+	/* provide nvm, fw, api versions */
+	dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
+		 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
+		 hw->aq.api_maj_ver, hw->aq.api_min_ver,
+		 i40e_nvm_version_str(hw));
+
 	if (err) {
 		dev_info(&pdev->dev,
 			 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
@@ -10158,8 +10403,12 @@
 	clear_bit(__I40E_SERVICE_SCHED, &pf->state);
 	pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
 
-	/* WoL defaults to disabled */
-	pf->wol_en = false;
+	/* NVM bit on means WoL disabled for the port */
+	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
+	if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1)
+		pf->wol_en = false;
+	else
+		pf->wol_en = true;
 	device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
 
 	/* set up the main switch operations */
@@ -10200,6 +10449,25 @@
 		dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
 		goto err_vsis;
 	}
+
+	/* Make sure flow control is set according to current settings */
+	err = i40e_set_fc(hw, &set_fc_aq_fail, true);
+	if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
+		dev_dbg(&pf->pdev->dev,
+			"Set fc with err %s aq_err %s on get_phy_cap\n",
+			i40e_stat_str(hw, err),
+			i40e_aq_str(hw, hw->aq.asq_last_status));
+	if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
+		dev_dbg(&pf->pdev->dev,
+			"Set fc with err %s aq_err %s on set_phy_config\n",
+			i40e_stat_str(hw, err),
+			i40e_aq_str(hw, hw->aq.asq_last_status));
+	if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
+		dev_dbg(&pf->pdev->dev,
+			"Set fc with err %s aq_err %s on get_link_info\n",
+			i40e_stat_str(hw, err),
+			i40e_aq_str(hw, hw->aq.asq_last_status));
+
 	/* if FDIR VSI was set up, start it now */
 	for (i = 0; i < pf->num_alloc_vsi; i++) {
 		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
@@ -10290,37 +10558,82 @@
 	i40e_fcoe_vsi_setup(pf);
 
 #endif
-	/* Get the negotiated link width and speed from PCI config space */
-	pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
+#define PCI_SPEED_SIZE 8
+#define PCI_WIDTH_SIZE 8
+	/* Devices on the IOSF bus do not have this information
+	 * and will report PCI Gen 1 x 1 by default so don't bother
+	 * checking them.
+	 */
+	if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
+		char speed[PCI_SPEED_SIZE] = "Unknown";
+		char width[PCI_WIDTH_SIZE] = "Unknown";
 
-	i40e_set_pci_config_data(hw, link_status);
+		/* Get the negotiated link width and speed from PCI config
+		 * space
+		 */
+		pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
+					  &link_status);
 
-	dev_info(&pdev->dev, "PCI-Express: %s %s\n",
-		(hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
-		 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
-		 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
-		 "Unknown"),
-		(hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
-		 hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
-		 hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
-		 hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
-		 "Unknown"));
+		i40e_set_pci_config_data(hw, link_status);
 
-	if (hw->bus.width < i40e_bus_width_pcie_x8 ||
-	    hw->bus.speed < i40e_bus_speed_8000) {
-		dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
-		dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+		switch (hw->bus.speed) {
+		case i40e_bus_speed_8000:
+			strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
+		case i40e_bus_speed_5000:
+			strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
+		case i40e_bus_speed_2500:
+			strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
+		default:
+			break;
+		}
+		switch (hw->bus.width) {
+		case i40e_bus_width_pcie_x8:
+			strncpy(width, "8", PCI_WIDTH_SIZE); break;
+		case i40e_bus_width_pcie_x4:
+			strncpy(width, "4", PCI_WIDTH_SIZE); break;
+		case i40e_bus_width_pcie_x2:
+			strncpy(width, "2", PCI_WIDTH_SIZE); break;
+		case i40e_bus_width_pcie_x1:
+			strncpy(width, "1", PCI_WIDTH_SIZE); break;
+		default:
+			break;
+		}
+
+		dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
+			 speed, width);
+
+		if (hw->bus.width < i40e_bus_width_pcie_x8 ||
+		    hw->bus.speed < i40e_bus_speed_8000) {
+			dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+			dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+		}
 	}
 
 	/* get the requested speeds from the fw */
 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
 	if (err)
-		dev_info(&pf->pdev->dev,
-			 "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
-			 i40e_stat_str(&pf->hw, err),
-			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
+			i40e_stat_str(&pf->hw, err),
+			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
 
+	/* get the supported phy types from the fw */
+	err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
+	if (err)
+		dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
+			i40e_stat_str(&pf->hw, err),
+			i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+	pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
+
+	/* Add a filter to drop all Flow control frames from any VSI from being
+	 * transmitted. By doing so we stop a malicious VF from sending out
+	 * PAUSE or PFC frames and potentially controlling traffic for other
+	 * PF/VF VSIs.
+	 * The FW can still send Flow control frames if enabled.
+	 */
+	i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
+						       pf->main_vsi_seid);
+
 	/* print a string summarizing features */
 	i40e_print_features(pf);
 
@@ -10368,6 +10681,7 @@
 static void i40e_remove(struct pci_dev *pdev)
 {
 	struct i40e_pf *pf = pci_get_drvdata(pdev);
+	struct i40e_hw *hw = &pf->hw;
 	i40e_status ret_code;
 	int i;
 
@@ -10375,6 +10689,10 @@
 
 	i40e_ptp_stop(pf);
 
+	/* Disable RSS in hw */
+	wr32(hw, I40E_PFQF_HENA(0), 0);
+	wr32(hw, I40E_PFQF_HENA(1), 0);
+
 	/* no more scheduling of any task */
 	set_bit(__I40E_DOWN, &pf->state);
 	del_timer_sync(&pf->service_timer);
@@ -10491,7 +10809,7 @@
 	int err;
 	u32 reg;
 
-	dev_info(&pdev->dev, "%s\n", __func__);
+	dev_dbg(&pdev->dev, "%s\n", __func__);
 	if (pci_enable_device_mem(pdev)) {
 		dev_info(&pdev->dev,
 			 "Cannot re-enable PCI device after reset.\n");
@@ -10531,13 +10849,13 @@
 {
 	struct i40e_pf *pf = pci_get_drvdata(pdev);
 
-	dev_info(&pdev->dev, "%s\n", __func__);
+	dev_dbg(&pdev->dev, "%s\n", __func__);
 	if (test_bit(__I40E_SUSPENDED, &pf->state))
 		return;
 
 	rtnl_lock();
 	i40e_handle_reset_warning(pf);
-	rtnl_lock();
+	rtnl_unlock();
 }
 
 /**
@@ -10623,9 +10941,7 @@
 
 	err = pci_enable_device_mem(pdev);
 	if (err) {
-		dev_err(&pdev->dev,
-			"%s: Cannot enable PCI device from suspend\n",
-			__func__);
+		dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
 		return err;
 	}
 	pci_set_master(pdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index d0288ad..6100cdd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -290,9 +290,18 @@
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
 			       u16 *data)
 {
-	if (hw->mac.type == I40E_MAC_X722)
-		return i40e_read_nvm_word_aq(hw, offset, data);
-	return i40e_read_nvm_word_srctl(hw, offset, data);
+	enum i40e_status_code ret_code = 0;
+
+	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+		if (!ret_code) {
+			ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+			i40e_release_nvm(hw);
+		}
+	} else {
+		ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+	}
+	return ret_code;
 }
 
 /**
@@ -397,9 +406,19 @@
 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
 				 u16 *words, u16 *data)
 {
-	if (hw->mac.type == I40E_MAC_X722)
-		return i40e_read_nvm_buffer_aq(hw, offset, words, data);
-	return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+	enum i40e_status_code ret_code = 0;
+
+	if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+		ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+		if (!ret_code) {
+			ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+							   data);
+			i40e_release_nvm(hw);
+		}
+	} else {
+		ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+	}
+	return ret_code;
 }
 
 /**
@@ -465,7 +484,7 @@
 static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
 						    u16 *checksum)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code;
 	struct i40e_virt_mem vmem;
 	u16 pcie_alt_module = 0;
 	u16 checksum_local = 0;
@@ -545,13 +564,16 @@
  **/
 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
 {
-	i40e_status ret_code = 0;
+	i40e_status ret_code;
 	u16 checksum;
+	__le16 le_sum;
 
 	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
-	if (!ret_code)
+	if (!ret_code) {
+		le_sum = cpu_to_le16(checksum);
 		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
-					     1, &checksum, true);
+					     1, &le_sum, true);
+	}
 
 	return ret_code;
 }
@@ -630,7 +652,7 @@
 	return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
 }
 
-static char *i40e_nvm_update_state_str[] = {
+static const char * const i40e_nvm_update_state_str[] = {
 	"I40E_NVMUPD_INVALID",
 	"I40E_NVMUPD_READ_CON",
 	"I40E_NVMUPD_READ_SNT",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index dcb72a8..bb9d583 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -58,8 +58,8 @@
 void i40e_idle_aq(struct i40e_hw *hw);
 bool i40e_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
 				bool pf_lut, u8 *lut, u16 lut_size);
@@ -258,7 +258,8 @@
 i40e_status i40e_pf_reset(struct i40e_hw *hw);
 void i40e_clear_hw(struct i40e_hw *hw);
 void i40e_clear_pxe_mode(struct i40e_hw *hw);
-bool i40e_get_link_status(struct i40e_hw *hw);
+i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+i40e_status i40e_update_link_info(struct i40e_hw *hw);
 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
 				      u32 *max_bw, u32 *min_bw, bool *min_valid,
@@ -321,4 +322,6 @@
 			       void *buff, u16 *ret_buff_size,
 			       u8 *ret_next_table, u32 *ret_next_index,
 			       struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+						    u16 vsi_seid);
 #endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 8c40d6e..565ca7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -618,9 +618,8 @@
 
 	/* Attempt to register the clock before enabling the hardware. */
 	pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
-	if (IS_ERR(pf->ptp_clock)) {
+	if (IS_ERR(pf->ptp_clock))
 		return PTR_ERR(pf->ptp_clock);
-	}
 
 	/* clear the hwtstamp settings here during clock create, instead of
 	 * during regular init, so that we can maintain settings across a
@@ -675,8 +674,8 @@
 		struct timespec64 ts;
 		u32 regval;
 
-		dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
-			 netdev->name);
+		if (pf->hw.debug_mask & I40E_DEBUG_LAN)
+			dev_info(&pf->pdev->dev, "PHC enabled\n");
 		pf->flags |= I40E_FLAG_PTP;
 
 		/* Ensure the clocks are running. */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 3ce4900..635b3ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -465,10 +465,11 @@
 		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
 
 	if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+		pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
 		if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
 			dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
-				 rx_desc->wb.qword0.hi_dword.fd_id);
+				 pf->fd_inv);
 
 		/* Check if the programming error is for ATR.
 		 * If so, auto disable ATR and set a state for
@@ -814,6 +815,8 @@
  * i40e_set_new_dynamic_itr - Find new ITR level
  * @rc: structure containing ring performance data
  *
+ * Returns true if ITR changed, false if not
+ *
  * Stores a new ITR value based on packets and byte counts during
  * the last interrupt.  The advantage of per interrupt computation
  * is faster updates and more accurate ITR for the current traffic
@@ -822,21 +825,32 @@
  * testing data as well as attempting to minimize response time
  * while increasing bulk throughput.
  **/
-static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
 {
 	enum i40e_latency_range new_latency_range = rc->latency_range;
+	struct i40e_q_vector *qv = rc->ring->q_vector;
 	u32 new_itr = rc->itr;
 	int bytes_per_int;
+	int usecs;
 
 	if (rc->total_packets == 0 || !rc->itr)
-		return;
+		return false;
 
 	/* simple throttlerate management
-	 *   0-10MB/s   lowest (100000 ints/s)
+	 *   0-10MB/s   lowest (50000 ints/s)
 	 *  10-20MB/s   low    (20000 ints/s)
-	 *  20-1249MB/s bulk   (8000 ints/s)
+	 *  20-1249MB/s bulk   (18000 ints/s)
+	 *  > 40000 Rx packets per second (8000 ints/s)
+	 *
+	 * The math works out because the divisor is in 10^(-6) which
+	 * turns the bytes/us input value into MB/s values, but
+	 * make sure to use usecs, as the register values written
+	 * are in 2 usec increments in the ITR registers, and make sure
+	 * to use the smoothed values that the countdown timer gives us.
 	 */
-	bytes_per_int = rc->total_bytes / rc->itr;
+	usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+	bytes_per_int = rc->total_bytes / usecs;
+
 	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
 		if (bytes_per_int > 10)
@@ -849,35 +863,52 @@
 			new_latency_range = I40E_LOWEST_LATENCY;
 		break;
 	case I40E_BULK_LATENCY:
-		if (bytes_per_int <= 20)
-			new_latency_range = I40E_LOW_LATENCY;
-		break;
+	case I40E_ULTRA_LATENCY:
 	default:
 		if (bytes_per_int <= 20)
 			new_latency_range = I40E_LOW_LATENCY;
 		break;
 	}
+
+	/* this is to adjust RX more aggressively when streaming small
+	 * packets.  The value of 40000 was picked as it is just beyond
+	 * what the hardware can receive per second if in low latency
+	 * mode.
+	 */
+#define RX_ULTRA_PACKET_RATE 40000
+
+	if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
+	    (&qv->rx == rc))
+		new_latency_range = I40E_ULTRA_LATENCY;
+
 	rc->latency_range = new_latency_range;
 
 	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
-		new_itr = I40E_ITR_100K;
+		new_itr = I40E_ITR_50K;
 		break;
 	case I40E_LOW_LATENCY:
 		new_itr = I40E_ITR_20K;
 		break;
 	case I40E_BULK_LATENCY:
+		new_itr = I40E_ITR_18K;
+		break;
+	case I40E_ULTRA_LATENCY:
 		new_itr = I40E_ITR_8K;
 		break;
 	default:
 		break;
 	}
 
-	if (new_itr != rc->itr)
-		rc->itr = new_itr;
-
 	rc->total_bytes = 0;
 	rc->total_packets = 0;
+
+	if (new_itr != rc->itr) {
+		rc->itr = new_itr;
+		return true;
+	}
+
+	return false;
 }
 
 /**
@@ -923,6 +954,8 @@
 	if (!dev)
 		return -ENOMEM;
 
+	/* warn if we are about to overwrite the pointer */
+	WARN_ON(tx_ring->tx_bi);
 	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
 	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
 	if (!tx_ring->tx_bi)
@@ -1083,6 +1116,8 @@
 	struct device *dev = rx_ring->dev;
 	int bi_size;
 
+	/* warn if we are about to overwrite the pointer */
+	WARN_ON(rx_ring->rx_bi);
 	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
 	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
 	if (!rx_ring->rx_bi)
@@ -1263,16 +1298,11 @@
 			     struct sk_buff *skb, u16 vlan_tag)
 {
 	struct i40e_q_vector *q_vector = rx_ring->q_vector;
-	struct i40e_vsi *vsi = rx_ring->vsi;
-	u64 flags = vsi->back->flags;
 
 	if (vlan_tag & VLAN_VID_MASK)
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
-	if (flags & I40E_FLAG_IN_NETPOLL)
-		netif_rx(skb);
-	else
-		napi_gro_receive(&q_vector->napi, skb);
+	napi_gro_receive(&q_vector->napi, skb);
 }
 
 /**
@@ -1439,7 +1469,7 @@
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 	u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-	const int current_node = numa_node_id();
+	const int current_node = numa_mem_id();
 	struct i40e_vsi *vsi = rx_ring->vsi;
 	u16 i = rx_ring->next_to_clean;
 	union i40e_rx_desc *rx_desc;
@@ -1517,6 +1547,7 @@
 		cleaned_count++;
 		if (rx_hbo || rx_sph) {
 			int len;
+
 			if (rx_hbo)
 				len = I40E_RX_HDR_SIZE;
 			else
@@ -1702,9 +1733,6 @@
 		/* ERR_MASK will only have valid bits if EOP set */
 		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
-			/* TODO: shouldn't we increment a counter indicating the
-			 * drop?
-			 */
 			continue;
 		}
 
@@ -1749,6 +1777,21 @@
 	return total_rx_packets;
 }
 
+static u32 i40e_buildreg_itr(const int type, const u16 itr)
+{
+	u32 val;
+
+	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+	      I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+	      (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+	      (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+
+	return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_PFINT_DYN_CTLN
+
 /**
  * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
  * @vsi: the VSI we care about
@@ -1759,56 +1802,69 @@
 					  struct i40e_q_vector *q_vector)
 {
 	struct i40e_hw *hw = &vsi->back->hw;
-	u16 old_itr;
+	bool rx = false, tx = false;
+	u32 rxval, txval;
 	int vector;
-	u32 val;
 
 	vector = (q_vector->v_idx + vsi->base_vector);
+
+	/* avoid dynamic calculation if in countdown mode OR if
+	 * all dynamic is disabled
+	 */
+	rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+
+	if (q_vector->itr_countdown > 0 ||
+	    (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
+	     !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+		goto enable_int;
+	}
+
 	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
-		old_itr = q_vector->rx.itr;
-		i40e_set_new_dynamic_itr(&q_vector->rx);
-		if (old_itr != q_vector->rx.itr) {
-			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-			I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-			(I40E_RX_ITR <<
-				I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
-			(q_vector->rx.itr <<
-				I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
-		} else {
-			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-			I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-			(I40E_ITR_NONE <<
-				I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
-		}
-		if (!test_bit(__I40E_DOWN, &vsi->state))
-			wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
-	} else {
-		i40e_irq_dynamic_enable(vsi,
-					q_vector->v_idx + vsi->base_vector);
+		rx = i40e_set_new_dynamic_itr(&q_vector->rx);
+		rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
 	}
+
 	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
-		old_itr = q_vector->tx.itr;
-		i40e_set_new_dynamic_itr(&q_vector->tx);
-		if (old_itr != q_vector->tx.itr) {
-			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-				(I40E_TX_ITR <<
-				   I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
-				(q_vector->tx.itr <<
-				   I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
-		} else {
-			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
-				(I40E_ITR_NONE <<
-				   I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
-		}
-		if (!test_bit(__I40E_DOWN, &vsi->state))
-			wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
-			      vsi->base_vector - 1), val);
-	} else {
-		i40e_irq_dynamic_enable(vsi,
-					q_vector->v_idx + vsi->base_vector);
+		tx = i40e_set_new_dynamic_itr(&q_vector->tx);
+		txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
 	}
+
+	if (rx || tx) {
+		/* get the higher of the two ITR adjustments and
+		 * use the same value for both ITR registers
+		 * when in adaptive mode (Rx and/or Tx)
+		 */
+		u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
+
+		q_vector->tx.itr = q_vector->rx.itr = itr;
+		txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
+		tx = true;
+		rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
+		rx = true;
+	}
+
+	/* only need to enable the interrupt once, but need
+	 * to possibly update both ITR values
+	 */
+	if (rx) {
+		/* set the INTENA_MSK_MASK so that this first write
+		 * won't actually enable the interrupt, instead just
+		 * updating the ITR (it's bit 31 PF and VF)
+		 */
+		rxval |= BIT(31);
+		/* don't check _DOWN because interrupt isn't being enabled */
+		wr32(hw, INTREG(vector - 1), rxval);
+	}
+
+enable_int:
+	if (!test_bit(__I40E_DOWN, &vsi->state))
+		wr32(hw, INTREG(vector - 1), txval);
+
+	if (q_vector->itr_countdown)
+		q_vector->itr_countdown--;
+	else
+		q_vector->itr_countdown = ITR_COUNTDOWN_START;
+
 }
 
 /**
@@ -1829,7 +1885,7 @@
 	bool clean_complete = true;
 	bool arm_wb = false;
 	int budget_per_ring;
-	int cleaned;
+	int work_done = 0;
 
 	if (test_bit(__I40E_DOWN, &vsi->state)) {
 		napi_complete(napi);
@@ -1842,24 +1898,34 @@
 	i40e_for_each_ring(ring, q_vector->tx) {
 		clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
 		arm_wb |= ring->arm_wb;
+		ring->arm_wb = false;
 	}
 
+	/* Handle case where we are called by netpoll with a budget of 0 */
+	if (budget <= 0)
+		goto tx_only;
+
 	/* We attempt to distribute budget to each Rx queue fairly, but don't
 	 * allow the budget to go below 1 because that would exit polling early.
 	 */
 	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
 	i40e_for_each_ring(ring, q_vector->rx) {
+		int cleaned;
+
 		if (ring_is_ps_enabled(ring))
 			cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
 		else
 			cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+
+		work_done += cleaned;
 		/* if we didn't clean as many as budgeted, we must be done */
 		clean_complete &= (budget_per_ring != cleaned);
 	}
 
 	/* If work not completed, return budget and polling will return */
 	if (!clean_complete) {
+tx_only:
 		if (arm_wb)
 			i40e_force_wb(vsi, q_vector);
 		return budget;
@@ -1869,7 +1935,7 @@
 		q_vector->arm_wb_state = false;
 
 	/* Work is done so exit the polling mode and re-enable the interrupt */
-	napi_complete(napi);
+	napi_complete_done(napi, work_done);
 	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
 		i40e_update_enable_itr(vsi, q_vector);
 	} else { /* Legacy mode */
@@ -2077,6 +2143,7 @@
 	/* else if it is a SW VLAN, check the next protocol and store the tag */
 	} else if (protocol == htons(ETH_P_8021Q)) {
 		struct vlan_hdr *vhdr, _vhdr;
+
 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
 		if (!vhdr)
 			return -EINVAL;
@@ -2120,6 +2187,7 @@
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
  * @hdr_len:  ptr to the size of the packet header
+ * @cd_type_cmd_tso_mss: ptr to u64 object
  * @cd_tunneling: ptr to context descriptor bits
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
@@ -2179,6 +2247,7 @@
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
  * @tx_flags: the collected send information
+ * @cd_type_cmd_tso_mss: ptr to u64 object
  *
  * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
  **/
@@ -2221,6 +2290,7 @@
  * @tx_flags: pointer to Tx flags currently set
  * @td_cmd: Tx descriptor command bits to set
  * @td_offset: Tx descriptor header offsets to set
+ * @tx_ring: Tx descriptor ring
  * @cd_tunneling: ptr to context desc bits
  **/
 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
@@ -2736,6 +2806,7 @@
 	u8 hdr_len = 0;
 	int tsyn;
 	int tso;
+
 	if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
 		return NETDEV_TX_BUSY;
 
@@ -2768,10 +2839,11 @@
 	if (tsyn)
 		tx_flags |= I40E_TX_FLAGS_TSYN;
 
-	if (i40e_chk_linearize(skb, tx_flags))
+	if (i40e_chk_linearize(skb, tx_flags)) {
 		if (skb_linearize(skb))
 			goto out_drop;
-
+		tx_ring->tx_stats.tx_linearize++;
+	}
 	skb_tx_timestamp(skb);
 
 	/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index a3978c2..6779fb7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -32,11 +32,14 @@
 #define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
 #define I40E_MIN_ITR               0x0001  /* reg uses 2 usec resolution */
 #define I40E_ITR_100K              0x0005
+#define I40E_ITR_50K               0x000A
 #define I40E_ITR_20K               0x0019
+#define I40E_ITR_18K               0x001B
 #define I40E_ITR_8K                0x003E
 #define I40E_ITR_4K                0x007A
-#define I40E_ITR_RX_DEF            I40E_ITR_8K
-#define I40E_ITR_TX_DEF            I40E_ITR_4K
+#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
+#define I40E_ITR_RX_DEF            I40E_ITR_20K
+#define I40E_ITR_TX_DEF            I40E_ITR_20K
 #define I40E_ITR_DYNAMIC           0x8000  /* use top bit as a flag */
 #define I40E_MIN_INT_RATE          250     /* ~= 1000000 / (I40E_MAX_ITR * 2) */
 #define I40E_MAX_INT_RATE          500000  /* == 1000000 / (I40E_MIN_ITR * 2) */
@@ -44,6 +47,15 @@
 #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
 #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
 #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
+ * the value of the rate limit is non-zero
+ */
+#define INTRL_ENA                  BIT(6)
+#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+#define I40E_INTRL_8K              125     /* 8000 ints/sec */
+#define I40E_INTRL_62K             16      /* 62500 ints/sec */
+#define I40E_INTRL_83K             12      /* 83333 ints/sec */
 
 #define I40E_QUEUE_END_OF_LIST 0x7FF
 
@@ -79,12 +91,12 @@
 	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
-	BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
-	BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
-	BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
-	BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
-	BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
-	BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
 
 #define i40e_pf_get_default_rss_hena(pf) \
 	(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
@@ -165,6 +177,7 @@
 	};
 	unsigned int bytecount;
 	unsigned short gso_segs;
+
 	DEFINE_DMA_UNMAP_ADDR(dma);
 	DEFINE_DMA_UNMAP_LEN(len);
 	u32 tx_flags;
@@ -188,6 +201,7 @@
 	u64 restart_queue;
 	u64 tx_busy;
 	u64 tx_done_old;
+	u64 tx_linearize;
 };
 
 struct i40e_rx_queue_stats {
@@ -284,6 +298,7 @@
 	I40E_LOWEST_LATENCY = 0,
 	I40E_LOW_LATENCY = 1,
 	I40E_BULK_LATENCY = 2,
+	I40E_ULTRA_LATENCY = 3,
 };
 
 struct i40e_ring_container {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index af48290..dd2da35 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -33,29 +33,7 @@
 #include "i40e_adminq.h"
 #include "i40e_hmc.h"
 #include "i40e_lan_hmc.h"
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710		0x1572
-#define I40E_DEV_ID_QEMU		0x1574
-#define I40E_DEV_ID_KX_A		0x157F
-#define I40E_DEV_ID_KX_B		0x1580
-#define I40E_DEV_ID_KX_C		0x1581
-#define I40E_DEV_ID_QSFP_A		0x1583
-#define I40E_DEV_ID_QSFP_B		0x1584
-#define I40E_DEV_ID_QSFP_C		0x1585
-#define I40E_DEV_ID_10G_BASE_T		0x1586
-#define I40E_DEV_ID_20G_KR2		0x1587
-#define I40E_DEV_ID_VF			0x154C
-#define I40E_DEV_ID_VF_HV		0x1571
-#define I40E_DEV_ID_SFP_X722		0x37D0
-#define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
-#define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
-#define I40E_DEV_ID_X722_VF		0x37CD
-#define I40E_DEV_ID_X722_VF_HV		0x37D9
-
-#define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
-					 (d) == I40E_DEV_ID_QSFP_B  || \
-					 (d) == I40E_DEV_ID_QSFP_C)
+#include "i40e_devids.h"
 
 /* I40E_MASK is a macro used on 32 bit registers */
 #define I40E_MASK(mask, shift) (mask << shift)
@@ -158,14 +136,14 @@
 };
 
 enum i40e_vsi_type {
-	I40E_VSI_MAIN = 0,
-	I40E_VSI_VMDQ1,
-	I40E_VSI_VMDQ2,
-	I40E_VSI_CTRL,
-	I40E_VSI_FCOE,
-	I40E_VSI_MIRROR,
-	I40E_VSI_SRIOV,
-	I40E_VSI_FDIR,
+	I40E_VSI_MAIN	= 0,
+	I40E_VSI_VMDQ1	= 1,
+	I40E_VSI_VMDQ2	= 2,
+	I40E_VSI_CTRL	= 3,
+	I40E_VSI_FCOE	= 4,
+	I40E_VSI_MIRROR	= 5,
+	I40E_VSI_SRIOV	= 6,
+	I40E_VSI_FDIR	= 7,
 	I40E_VSI_TYPE_UNKNOWN
 };
 
@@ -189,16 +167,65 @@
 	bool crc_enable;
 	u8 pacing;
 	u8 requested_speeds;
+	u8 module_type[3];
+	/* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP		0x03
+#define I40E_MODULE_TYPE_QSFP		0x0D
+	/* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE	0x01
+#define I40E_MODULE_TYPE_40G_LR4	0x02
+#define I40E_MODULE_TYPE_40G_SR4	0x04
+#define I40E_MODULE_TYPE_40G_CR4	0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR	0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR	0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM	0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER	0x80
+	/* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX	0x01
+#define I40E_MODULE_TYPE_1000BASE_LX	0x02
+#define I40E_MODULE_TYPE_1000BASE_CX	0x04
+#define I40E_MODULE_TYPE_1000BASE_T	0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+	I40E_CAP_PHY_TYPE_SGMII		  = BIT(I40E_PHY_TYPE_SGMII),
+	I40E_CAP_PHY_TYPE_1000BASE_KX	  = BIT(I40E_PHY_TYPE_1000BASE_KX),
+	I40E_CAP_PHY_TYPE_10GBASE_KX4	  = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+	I40E_CAP_PHY_TYPE_10GBASE_KR	  = BIT(I40E_PHY_TYPE_10GBASE_KR),
+	I40E_CAP_PHY_TYPE_40GBASE_KR4	  = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+	I40E_CAP_PHY_TYPE_XAUI		  = BIT(I40E_PHY_TYPE_XAUI),
+	I40E_CAP_PHY_TYPE_XFI		  = BIT(I40E_PHY_TYPE_XFI),
+	I40E_CAP_PHY_TYPE_SFI		  = BIT(I40E_PHY_TYPE_SFI),
+	I40E_CAP_PHY_TYPE_XLAUI		  = BIT(I40E_PHY_TYPE_XLAUI),
+	I40E_CAP_PHY_TYPE_XLPPI		  = BIT(I40E_PHY_TYPE_XLPPI),
+	I40E_CAP_PHY_TYPE_40GBASE_CR4_CU  = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+	I40E_CAP_PHY_TYPE_10GBASE_CR1_CU  = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+	I40E_CAP_PHY_TYPE_10GBASE_AOC	  = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+	I40E_CAP_PHY_TYPE_40GBASE_AOC	  = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+	I40E_CAP_PHY_TYPE_100BASE_TX	  = BIT(I40E_PHY_TYPE_100BASE_TX),
+	I40E_CAP_PHY_TYPE_1000BASE_T	  = BIT(I40E_PHY_TYPE_1000BASE_T),
+	I40E_CAP_PHY_TYPE_10GBASE_T	  = BIT(I40E_PHY_TYPE_10GBASE_T),
+	I40E_CAP_PHY_TYPE_10GBASE_SR	  = BIT(I40E_PHY_TYPE_10GBASE_SR),
+	I40E_CAP_PHY_TYPE_10GBASE_LR	  = BIT(I40E_PHY_TYPE_10GBASE_LR),
+	I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+	I40E_CAP_PHY_TYPE_10GBASE_CR1	  = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+	I40E_CAP_PHY_TYPE_40GBASE_CR4	  = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+	I40E_CAP_PHY_TYPE_40GBASE_SR4	  = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+	I40E_CAP_PHY_TYPE_40GBASE_LR4	  = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+	I40E_CAP_PHY_TYPE_1000BASE_SX	  = BIT(I40E_PHY_TYPE_1000BASE_SX),
+	I40E_CAP_PHY_TYPE_1000BASE_LX	  = BIT(I40E_PHY_TYPE_1000BASE_LX),
+	I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
+					 BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+	I40E_CAP_PHY_TYPE_20GBASE_KR2	  = BIT(I40E_PHY_TYPE_20GBASE_KR2)
 };
 
 struct i40e_phy_info {
 	struct i40e_link_status link_info;
 	struct i40e_link_status link_info_old;
-	u32 autoneg_advertised;
-	u32 phy_id;
-	u32 module_type;
 	bool get_link_info;
 	enum i40e_media_type media_type;
+	/* all the phy types the NVM is capable of */
+	enum i40e_aq_capabilities_phy_type phy_types;
 };
 
 #define I40E_HW_CAP_MAX_GPIO			30
@@ -287,6 +314,7 @@
 	bool blank_nvm_mode;      /* is NVM empty (no FW present)*/
 	u16 version;              /* NVM package version */
 	u32 eetrack;              /* NVM data version */
+	u32 oem_ver;              /* OEM version info */
 };
 
 /* definitions used in NVM update support */
@@ -415,6 +443,8 @@
 #define I40E_APP_PROTOID_FIP		0x8914
 #define I40E_APP_SEL_ETHTYPE		0x1
 #define I40E_APP_SEL_TCPIP		0x2
+#define I40E_CEE_APP_SEL_ETHTYPE	0x0
+#define I40E_CEE_APP_SEL_TCPIP		0x1
 
 /* CEE or IEEE 802.1Qaz ETS Configuration data */
 struct i40e_dcb_ets_config {
@@ -445,6 +475,8 @@
 	u8  dcbx_mode;
 #define I40E_DCBX_MODE_CEE	0x1
 #define I40E_DCBX_MODE_IEEE	0x2
+	u8  app_mode;
+#define I40E_DCBX_APPS_NON_WILLING	0x1
 	u32 numapps;
 	u32 tlv_status; /* CEE mode TLV status */
 	struct i40e_dcb_ets_config etscfg;
@@ -508,8 +540,12 @@
 	u16 dcbx_status;
 
 	/* DCBX info */
-	struct i40e_dcbx_config local_dcbx_config;
-	struct i40e_dcbx_config remote_dcbx_config;
+	struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+	struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+	struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+
+#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
+	u64 flags;
 
 	/* debug mask */
 	u32 debug_mask;
@@ -1032,8 +1068,8 @@
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT	23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
-				       BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK	(0x1FFUL << \
+					 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT	4
 #define I40E_TXD_FLTR_QW1_CMD_MASK	(0xFFFFULL << \
@@ -1201,6 +1237,8 @@
 #define I40E_SR_EMP_MODULE_PTR			0x0F
 #define I40E_SR_PBA_FLAGS			0x15
 #define I40E_SR_PBA_BLOCK_PTR			0x16
+#define I40E_SR_BOOT_CONFIG_PTR			0x17
+#define I40E_NVM_OEM_VER_OFF			0x83
 #define I40E_SR_NVM_DEV_STARTER_VERSION		0x18
 #define I40E_SR_NVM_WAKE_ON_LAN			0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR	0x27
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 0f8d415..ae87982 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -81,7 +81,6 @@
 	I40E_VIRTCHNL_OP_GET_STATS = 15,
 	I40E_VIRTCHNL_OP_FCOE = 16,
 	I40E_VIRTCHNL_OP_EVENT = 17,
-	I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
 };
 
 /* Virtual channel message descriptor. This overlays the admin queue
@@ -151,6 +150,7 @@
 #define I40E_VIRTCHNL_VF_OFFLOAD_FCOE		0x00000004
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG	0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR	0x00000020
 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING	0x00020000
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index eacce93..44462b4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -536,6 +536,7 @@
 	}
 	if (type == I40E_VSI_SRIOV) {
 		u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
 		vf->lan_vsi_idx = vsi->idx;
 		vf->lan_vsi_id = vsi->id;
 		/* If the port VLAN has been configured and then the
@@ -546,6 +547,8 @@
 		 */
 		if (vf->port_vlan_id)
 			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
+
+		spin_lock_bh(&vsi->mac_filter_list_lock);
 		f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
 				    vf->port_vlan_id ? vf->port_vlan_id : -1,
 				    true, false);
@@ -558,6 +561,7 @@
 		if (!f)
 			dev_info(&pf->pdev->dev,
 				 "Could not allocate VF broadcast filter\n");
+		spin_unlock_bh(&vsi->mac_filter_list_lock);
 	}
 
 	/* program mac filter */
@@ -605,6 +609,7 @@
 	/* map PF queues to VF queues */
 	for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
 		u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
+
 		reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
 		wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
 		total_queue_pairs++;
@@ -701,6 +706,7 @@
 	 */
 	vf->num_queue_pairs = 0;
 	vf->vf_states = 0;
+	clear_bit(I40E_VF_STAT_INIT, &vf->vf_states);
 }
 
 /**
@@ -839,11 +845,11 @@
 complete_reset:
 	/* reallocate VF resources to reset the VSI state */
 	i40e_free_vf_res(vf);
-	i40e_alloc_vf_res(vf);
-	i40e_enable_vf_mappings(vf);
-	set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
-	clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
-
+	if (!i40e_alloc_vf_res(vf)) {
+		i40e_enable_vf_mappings(vf);
+		set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+		clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+	}
 	/* tell the VF the reset is done */
 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
 	i40e_flush(hw);
@@ -872,6 +878,11 @@
 			i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
 					       false);
 
+	for (i = 0; i < pf->num_alloc_vfs; i++)
+		if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+			i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
+					       false);
+
 	/* Disable IOV before freeing resources. This lets any VF drivers
 	 * running in the host get themselves cleaned up before we yank
 	 * the carpet out from underneath their feet.
@@ -933,6 +944,7 @@
 	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
 		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
 		if (ret) {
+			pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
 			pf->num_alloc_vfs = 0;
 			goto err_iov;
 		}
@@ -957,8 +969,6 @@
 		/* VF resources get allocated during reset */
 		i40e_reset_vf(&vfs[i], false);
 
-		/* enable VF vplan_qtable mappings */
-		i40e_enable_vf_mappings(&vfs[i]);
 	}
 	pf->num_alloc_vfs = num_alloc_vfs;
 
@@ -986,24 +996,26 @@
 	int pre_existing_vfs = pci_num_vf(pdev);
 	int err = 0;
 
-	if (pf->state & __I40E_TESTING) {
+	if (test_bit(__I40E_TESTING, &pf->state)) {
 		dev_warn(&pdev->dev,
 			 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
 		err = -EPERM;
 		goto err_out;
 	}
 
-	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
 		i40e_free_vfs(pf);
 	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
 		goto out;
 
 	if (num_vfs > pf->num_req_vfs) {
+		dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
+			 num_vfs, pf->num_req_vfs);
 		err = -EPERM;
 		goto err_out;
 	}
 
+	dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
 	err = i40e_alloc_vfs(pf, num_vfs);
 	if (err) {
 		dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
@@ -1094,6 +1106,8 @@
 		}
 	} else {
 		vf->num_valid_msgs++;
+		/* reset the invalid counter, if a valid message is received. */
+		vf->num_invalid_msgs = 0;
 	}
 
 	aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,	v_opcode, v_retval,
@@ -1195,16 +1209,22 @@
 	} else {
 		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
 	}
+
+	if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+
 	vfres->num_vsis = num_vsis;
 	vfres->num_queue_pairs = vf->num_queue_pairs;
 	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
 	if (vf->lan_vsi_idx) {
 		vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
 		vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
-		vfres->vsi_res[i].num_queue_pairs =
-		    pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
-		memcpy(vfres->vsi_res[i].default_mac_addr,
-		       vf->default_lan_addr.addr, ETH_ALEN);
+		vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
+		/* VFs only use TC 0 */
+		vfres->vsi_res[i].qset_handle
+					  = le16_to_cpu(vsi->info.qs_handle[0]);
+		ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
+				vf->default_lan_addr.addr);
 		i++;
 	}
 	set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
@@ -1582,6 +1602,11 @@
 	}
 	vsi = pf->vsi[vf->lan_vsi_idx];
 
+	/* Lock once, because all function inside for loop accesses VSI's
+	 * MAC filter list which needs to be protected using same lock.
+	 */
+	spin_lock_bh(&vsi->mac_filter_list_lock);
+
 	/* add new addresses to the list */
 	for (i = 0; i < al->num_elements; i++) {
 		struct i40e_mac_filter *f;
@@ -1600,9 +1625,11 @@
 			dev_err(&pf->pdev->dev,
 				"Unable to add VF MAC filter\n");
 			ret = I40E_ERR_PARAM;
+			spin_unlock_bh(&vsi->mac_filter_list_lock);
 			goto error_param;
 		}
 	}
+	spin_unlock_bh(&vsi->mac_filter_list_lock);
 
 	/* program the updated filter list */
 	if (i40e_sync_vsi_filters(vsi, false))
@@ -1650,10 +1677,12 @@
 	}
 	vsi = pf->vsi[vf->lan_vsi_idx];
 
+	spin_lock_bh(&vsi->mac_filter_list_lock);
 	/* delete addresses from the list */
 	for (i = 0; i < al->num_elements; i++)
 		i40e_del_filter(vsi, al->list[i].addr,
 				I40E_VLAN_ANY, true, false);
+	spin_unlock_bh(&vsi->mac_filter_list_lock);
 
 	/* program the updated filter list */
 	if (i40e_sync_vsi_filters(vsi, false))
@@ -1708,6 +1737,7 @@
 	for (i = 0; i < vfl->num_elements; i++) {
 		/* add new VLAN filter */
 		int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+
 		if (ret)
 			dev_err(&pf->pdev->dev,
 				"Unable to add VF vlan filter %d, error %d\n",
@@ -1759,6 +1789,7 @@
 
 	for (i = 0; i < vfl->num_elements; i++) {
 		int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+
 		if (ret)
 			dev_err(&pf->pdev->dev,
 				"Unable to delete VF vlan filter %d, error %d\n",
@@ -1870,7 +1901,6 @@
 	case I40E_VIRTCHNL_OP_UNKNOWN:
 	default:
 		return -EPERM;
-		break;
 	}
 	/* few more checks */
 	if ((valid_len != msglen) || (err_msg_format)) {
@@ -2049,6 +2079,11 @@
 		goto error_param;
 	}
 
+	/* Lock once because below invoked function add/del_filter requires
+	 * mac_filter_list_lock to be held
+	 */
+	spin_lock_bh(&vsi->mac_filter_list_lock);
+
 	/* delete the temporary mac address */
 	i40e_del_filter(vsi, vf->default_lan_addr.addr,
 			vf->port_vlan_id ? vf->port_vlan_id : -1,
@@ -2060,6 +2095,8 @@
 	list_for_each_entry(f, &vsi->mac_filter_list, list)
 		i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
 
+	spin_unlock_bh(&vsi->mac_filter_list_lock);
+
 	dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
 	/* program mac filter */
 	if (i40e_sync_vsi_filters(vsi, false)) {
@@ -2089,8 +2126,10 @@
 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
 			      int vf_id, u16 vlan_id, u8 qos)
 {
+	u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
 	struct i40e_netdev_priv *np = netdev_priv(netdev);
 	struct i40e_pf *pf = np->vsi->back;
+	bool is_vsi_in_vlan = false;
 	struct i40e_vsi *vsi;
 	struct i40e_vf *vf;
 	int ret = 0;
@@ -2116,12 +2155,15 @@
 		goto error_pvid;
 	}
 
-	if (le16_to_cpu(vsi->info.pvid) ==
-	    (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)))
+	if (le16_to_cpu(vsi->info.pvid) == vlanprio)
 		/* duplicate request, so just return success */
 		goto error_pvid;
 
-	if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
+	spin_lock_bh(&vsi->mac_filter_list_lock);
+	is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
+	spin_unlock_bh(&vsi->mac_filter_list_lock);
+
+	if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
 		dev_err(&pf->pdev->dev,
 			"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
 			vf_id);
@@ -2141,7 +2183,7 @@
 	 * MAC addresses deleted.
 	 */
 	if ((!(vlan_id || qos) ||
-	    (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+	    vlanprio != le16_to_cpu(vsi->info.pvid)) &&
 	    vsi->info.pvid)
 		ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
 
@@ -2156,8 +2198,7 @@
 		}
 	}
 	if (vlan_id || qos)
-		ret = i40e_vsi_add_pvid(vsi,
-				vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
+		ret = i40e_vsi_add_pvid(vsi, vlanprio);
 	else
 		i40e_vsi_remove_pvid(vsi);
 
@@ -2310,7 +2351,7 @@
 
 	ivi->vf = vf_id;
 
-	memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
+	ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
 
 	ivi->max_tx_rate = vf->tx_rate;
 	ivi->min_tx_rate = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 736f6f0..da44995 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -29,8 +29,6 @@
 
 #include "i40e.h"
 
-#define I40E_MAX_MACVLAN_FILTERS 256
-#define I40E_MAX_VLAN_FILTERS 256
 #define I40E_MAX_VLANID 4095
 
 #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
@@ -98,7 +96,8 @@
 
 	u8 num_queue_pairs;	/* num of qps assigned to VF vsis */
 	u64 num_mdd_events;	/* num of mdd events detected */
-	u64 num_invalid_msgs;	/* num of malformed or invalid msgs detected */
+	/* num of continuous malformed or invalid msgs detected */
+	u64 num_invalid_msgs;
 	u64 num_valid_msgs;	/* num of valid msgs detected */
 
 	unsigned long vf_caps;	/* vf's adv. capabilities */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 0940db7..fd123ca 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -373,7 +373,6 @@
 
 	hw->aq.asq.next_to_use = 0;
 	hw->aq.asq.next_to_clean = 0;
-	hw->aq.asq.count = hw->aq.num_asq_entries;
 
 	/* allocate the ring memory */
 	ret_code = i40e_alloc_adminq_asq_ring(hw);
@@ -391,6 +390,7 @@
 		goto init_adminq_free_rings;
 
 	/* success! */
+	hw->aq.asq.count = hw->aq.num_asq_entries;
 	goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -432,7 +432,6 @@
 
 	hw->aq.arq.next_to_use = 0;
 	hw->aq.arq.next_to_clean = 0;
-	hw->aq.arq.count = hw->aq.num_arq_entries;
 
 	/* allocate the ring memory */
 	ret_code = i40e_alloc_adminq_arq_ring(hw);
@@ -450,6 +449,7 @@
 		goto init_adminq_free_rings;
 
 	/* success! */
+	hw->aq.arq.count = hw->aq.num_arq_entries;
 	goto init_adminq_exit;
 
 init_adminq_free_rings:
@@ -469,8 +469,12 @@
 {
 	i40e_status ret_code = 0;
 
-	if (hw->aq.asq.count == 0)
-		return I40E_ERR_NOT_READY;
+	mutex_lock(&hw->aq.asq_mutex);
+
+	if (hw->aq.asq.count == 0) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto shutdown_asq_out;
+	}
 
 	/* Stop firmware AdminQ processing */
 	wr32(hw, hw->aq.asq.head, 0);
@@ -479,16 +483,13 @@
 	wr32(hw, hw->aq.asq.bal, 0);
 	wr32(hw, hw->aq.asq.bah, 0);
 
-	/* make sure lock is available */
-	mutex_lock(&hw->aq.asq_mutex);
-
 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
 
 	/* free ring buffers */
 	i40e_free_asq_bufs(hw);
 
+shutdown_asq_out:
 	mutex_unlock(&hw->aq.asq_mutex);
-
 	return ret_code;
 }
 
@@ -502,8 +503,12 @@
 {
 	i40e_status ret_code = 0;
 
-	if (hw->aq.arq.count == 0)
-		return I40E_ERR_NOT_READY;
+	mutex_lock(&hw->aq.arq_mutex);
+
+	if (hw->aq.arq.count == 0) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto shutdown_arq_out;
+	}
 
 	/* Stop firmware AdminQ processing */
 	wr32(hw, hw->aq.arq.head, 0);
@@ -512,16 +517,13 @@
 	wr32(hw, hw->aq.arq.bal, 0);
 	wr32(hw, hw->aq.arq.bah, 0);
 
-	/* make sure lock is available */
-	mutex_lock(&hw->aq.arq_mutex);
-
 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
 
 	/* free ring buffers */
 	i40e_free_arq_bufs(hw);
 
+shutdown_arq_out:
 	mutex_unlock(&hw->aq.arq_mutex);
-
 	return ret_code;
 }
 
@@ -620,8 +622,7 @@
 	details = I40E_ADMINQ_DETAILS(*asq, ntc);
 	while (rd32(hw, hw->aq.asq.head) != ntc) {
 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "%s: ntc %d head %d.\n", __func__, ntc,
-			   rd32(hw, hw->aq.asq.head));
+			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
 
 		if (details->callback) {
 			I40E_ADMINQ_CALLBACK cb_func =
@@ -685,19 +686,23 @@
 	u16  retval = 0;
 	u32  val = 0;
 
-	val = rd32(hw, hw->aq.asq.head);
-	if (val >= hw->aq.num_asq_entries) {
-		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-			   "AQTX: head overrun at %d\n", val);
-		status = I40E_ERR_QUEUE_EMPTY;
-		goto asq_send_command_exit;
-	}
+	mutex_lock(&hw->aq.asq_mutex);
 
 	if (hw->aq.asq.count == 0) {
 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 			   "AQTX: Admin queue not initialized.\n");
 		status = I40E_ERR_QUEUE_EMPTY;
-		goto asq_send_command_exit;
+		goto asq_send_command_error;
+	}
+
+	hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+	val = rd32(hw, hw->aq.asq.head);
+	if (val >= hw->aq.num_asq_entries) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: head overrun at %d\n", val);
+		status = I40E_ERR_QUEUE_EMPTY;
+		goto asq_send_command_error;
 	}
 
 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
@@ -722,8 +727,6 @@
 	desc->flags &= ~cpu_to_le16(details->flags_dis);
 	desc->flags |= cpu_to_le16(details->flags_ena);
 
-	mutex_lock(&hw->aq.asq_mutex);
-
 	if (buff_size > hw->aq.asq_buf_size) {
 		i40e_debug(hw,
 			   I40E_DEBUG_AQ_MESSAGE,
@@ -848,7 +851,6 @@
 
 asq_send_command_error:
 	mutex_unlock(&hw->aq.asq_mutex);
-asq_send_command_exit:
 	return status;
 }
 
@@ -894,6 +896,13 @@
 	/* take the lock before we start messing with the ring */
 	mutex_lock(&hw->aq.arq_mutex);
 
+	if (hw->aq.arq.count == 0) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQRX: Admin queue not initialized.\n");
+		ret_code = I40E_ERR_QUEUE_EMPTY;
+		goto clean_arq_element_err;
+	}
+
 	/* set next_to_use to head */
 	ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
 	if (ntu == ntc) {
@@ -955,6 +964,8 @@
 	/* Set pending if needed, unlock and return */
 	if (pending != NULL)
 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
 	mutex_unlock(&hw->aq.arq_mutex);
 
 	return ret_code;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 547b79b..a3eae5d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -109,9 +109,10 @@
 
 /**
  * i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_rc: AdminQ error code to convert
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
  **/
-static inline int i40e_aq_rc_to_posix(u32 aq_ret, u16 aq_rc)
+static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
 {
 	int aq_to_posix[] = {
 		0,           /* I40E_AQ_RC_OK */
@@ -143,8 +144,9 @@
 	if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
 		return -EAGAIN;
 
-	if (aq_rc >= ARRAY_SIZE(aq_to_posix))
+	if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
 		return -ERANGE;
+
 	return aq_to_posix[aq_rc];
 }
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index c802209..fcb9ef3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1719,11 +1719,13 @@
 	u8	phy_type;    /* i40e_aq_phy_type   */
 	u8	link_speed;  /* i40e_aq_link_speed */
 	u8	link_info;
-#define I40E_AQ_LINK_UP			0x01
+#define I40E_AQ_LINK_UP			0x01    /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION	0x01
 #define I40E_AQ_LINK_FAULT		0x02
 #define I40E_AQ_LINK_FAULT_TX		0x04
 #define I40E_AQ_LINK_FAULT_RX		0x08
 #define I40E_AQ_LINK_FAULT_REMOTE	0x10
+#define I40E_AQ_LINK_UP_PORT		0x20
 #define I40E_AQ_MEDIA_AVAILABLE		0x40
 #define I40E_AQ_SIGNAL_DETECT		0x80
 	u8	an_info;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index d45d0ae..72b1942 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -51,7 +51,9 @@
 		case I40E_DEV_ID_QSFP_B:
 		case I40E_DEV_ID_QSFP_C:
 		case I40E_DEV_ID_10G_BASE_T:
+		case I40E_DEV_ID_10G_BASE_T4:
 		case I40E_DEV_ID_20G_KR2:
+		case I40E_DEV_ID_20G_KR2_A:
 			hw->mac.type = I40E_MAC_XL710;
 			break;
 		case I40E_DEV_ID_SFP_X722:
@@ -85,7 +87,7 @@
  * @hw: pointer to the HW structure
  * @aq_err: the AQ error code to convert
  **/
-char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
 {
 	switch (aq_err) {
 	case I40E_AQ_RC_OK:
@@ -145,7 +147,7 @@
  * @hw: pointer to the HW structure
  * @stat_err: the status error code to convert
  **/
-char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
 {
 	switch (stat_err) {
 	case 0:
@@ -329,25 +331,11 @@
 			len = buf_len;
 		/* write the full 16-byte chunks */
 		for (i = 0; i < (len - 16); i += 16)
-			i40e_debug(hw, mask,
-				   "\t0x%04X  %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
-				   i, buf[i], buf[i + 1], buf[i + 2],
-				   buf[i + 3], buf[i + 4], buf[i + 5],
-				   buf[i + 6], buf[i + 7], buf[i + 8],
-				   buf[i + 9], buf[i + 10], buf[i + 11],
-				   buf[i + 12], buf[i + 13], buf[i + 14],
-				   buf[i + 15]);
+			i40e_debug(hw, mask, "\t0x%04X  %16ph\n", i, buf + i);
 		/* write whatever's left over without overrunning the buffer */
-		if (i < len) {
-			char d_buf[80];
-			int j = 0;
-
-			memset(d_buf, 0, sizeof(d_buf));
-			j += sprintf(d_buf, "\t0x%04X ", i);
-			while (i < len)
-				j += sprintf(&d_buf[j], " %02X", buf[i++]);
-			i40e_debug(hw, mask, "%s\n", d_buf);
-		}
+		if (i < len)
+			i40e_debug(hw, mask, "\t0x%04X  %*ph\n",
+					     i, len - i, buf + i);
 	}
 }
 
@@ -441,9 +429,6 @@
 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
 					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
 
-	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
-	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
-
 	status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
 
 	return status;
@@ -518,8 +503,6 @@
 					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
 					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
 	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
-	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
-	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
 
 	status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
 
@@ -990,10 +973,10 @@
 			     I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
 	for (i = 0; i < msg->num_vsis; i++) {
 		if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
-			memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
-			       ETH_ALEN);
-			memcpy(hw->mac.addr, vsi_res->default_mac_addr,
-			       ETH_ALEN);
+			ether_addr_copy(hw->mac.perm_addr,
+					vsi_res->default_mac_addr);
+			ether_addr_copy(hw->mac.addr,
+					vsi_res->default_mac_addr);
 		}
 		vsi_res++;
 	}
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
new file mode 100644
index 0000000..e6a39c9
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -0,0 +1,55 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710		0x1572
+#define I40E_DEV_ID_QEMU		0x1574
+#define I40E_DEV_ID_KX_A		0x157F
+#define I40E_DEV_ID_KX_B		0x1580
+#define I40E_DEV_ID_KX_C		0x1581
+#define I40E_DEV_ID_QSFP_A		0x1583
+#define I40E_DEV_ID_QSFP_B		0x1584
+#define I40E_DEV_ID_QSFP_C		0x1585
+#define I40E_DEV_ID_10G_BASE_T		0x1586
+#define I40E_DEV_ID_20G_KR2		0x1587
+#define I40E_DEV_ID_20G_KR2_A		0x1588
+#define I40E_DEV_ID_10G_BASE_T4		0x1589
+#define I40E_DEV_ID_VF			0x154C
+#define I40E_DEV_ID_VF_HV		0x1571
+#define I40E_DEV_ID_SFP_X722		0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
+#define I40E_DEV_ID_X722_VF		0x37CD
+#define I40E_DEV_ID_X722_VF_HV		0x37D9
+
+#define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
+					 (d) == I40E_DEV_ID_QSFP_B  || \
+					 (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 55ae4b0..cbd9a1b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -60,8 +60,8 @@
 void i40evf_resume_aq(struct i40e_hw *hw);
 bool i40evf_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
 				  bool pf_lut, u8 *lut, u16 lut_size);
@@ -101,4 +101,6 @@
 				u16 vsi_seid, u16 queue, bool is_add,
 				struct i40e_control_filter_stats *stats,
 				struct i40e_asq_cmd_details *cmd_details);
+void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
+						    u16 vsi_seid);
 #endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 8309793..47e9a90 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -254,8 +254,6 @@
 	    !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
 	    (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
 		tx_ring->arm_wb = true;
-	else
-		tx_ring->arm_wb = false;
 
 	netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
 						      tx_ring->queue_index),
@@ -320,6 +318,8 @@
  * i40e_set_new_dynamic_itr - Find new ITR level
  * @rc: structure containing ring performance data
  *
+ * Returns true if ITR changed, false if not
+ *
  * Stores a new ITR value based on packets and byte counts during
  * the last interrupt.  The advantage of per interrupt computation
  * is faster updates and more accurate ITR for the current traffic
@@ -328,21 +328,32 @@
  * testing data as well as attempting to minimize response time
  * while increasing bulk throughput.
  **/
-static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
 {
 	enum i40e_latency_range new_latency_range = rc->latency_range;
+	struct i40e_q_vector *qv = rc->ring->q_vector;
 	u32 new_itr = rc->itr;
 	int bytes_per_int;
+	int usecs;
 
 	if (rc->total_packets == 0 || !rc->itr)
-		return;
+		return false;
 
 	/* simple throttlerate management
-	 *   0-10MB/s   lowest (100000 ints/s)
+	 *   0-10MB/s   lowest (50000 ints/s)
 	 *  10-20MB/s   low    (20000 ints/s)
-	 *  20-1249MB/s bulk   (8000 ints/s)
+	 *  20-1249MB/s bulk   (18000 ints/s)
+	 *  > 40000 Rx packets per second (8000 ints/s)
+	 *
+	 * The math works out because the divisor is in 10^(-6) which
+	 * turns the bytes/us input value into MB/s values, but
+	 * make sure to use usecs, as the register values written
+	 * are in 2 usec increments in the ITR registers, and make sure
+	 * to use the smoothed values that the countdown timer gives us.
 	 */
-	bytes_per_int = rc->total_bytes / rc->itr;
+	usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
+	bytes_per_int = rc->total_bytes / usecs;
+
 	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
 		if (bytes_per_int > 10)
@@ -355,35 +366,52 @@
 			new_latency_range = I40E_LOWEST_LATENCY;
 		break;
 	case I40E_BULK_LATENCY:
-		if (bytes_per_int <= 20)
-			new_latency_range = I40E_LOW_LATENCY;
-		break;
+	case I40E_ULTRA_LATENCY:
 	default:
 		if (bytes_per_int <= 20)
 			new_latency_range = I40E_LOW_LATENCY;
 		break;
 	}
+
+	/* this is to adjust RX more aggressively when streaming small
+	 * packets.  The value of 40000 was picked as it is just beyond
+	 * what the hardware can receive per second if in low latency
+	 * mode.
+	 */
+#define RX_ULTRA_PACKET_RATE 40000
+
+	if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
+	    (&qv->rx == rc))
+		new_latency_range = I40E_ULTRA_LATENCY;
+
 	rc->latency_range = new_latency_range;
 
 	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
-		new_itr = I40E_ITR_100K;
+		new_itr = I40E_ITR_50K;
 		break;
 	case I40E_LOW_LATENCY:
 		new_itr = I40E_ITR_20K;
 		break;
 	case I40E_BULK_LATENCY:
+		new_itr = I40E_ITR_18K;
+		break;
+	case I40E_ULTRA_LATENCY:
 		new_itr = I40E_ITR_8K;
 		break;
 	default:
 		break;
 	}
 
-	if (new_itr != rc->itr)
-		rc->itr = new_itr;
-
 	rc->total_bytes = 0;
 	rc->total_packets = 0;
+
+	if (new_itr != rc->itr) {
+		rc->itr = new_itr;
+		return true;
+	}
+
+	return false;
 }
 
 /*
@@ -744,16 +772,11 @@
 			     struct sk_buff *skb, u16 vlan_tag)
 {
 	struct i40e_q_vector *q_vector = rx_ring->q_vector;
-	struct i40e_vsi *vsi = rx_ring->vsi;
-	u64 flags = vsi->back->flags;
 
 	if (vlan_tag & VLAN_VID_MASK)
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
-	if (flags & I40E_FLAG_IN_NETPOLL)
-		netif_rx(skb);
-	else
-		napi_gro_receive(&q_vector->napi, skb);
+	napi_gro_receive(&q_vector->napi, skb);
 }
 
 /**
@@ -919,7 +942,7 @@
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 	u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
 	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-	const int current_node = numa_node_id();
+	const int current_node = numa_mem_id();
 	struct i40e_vsi *vsi = rx_ring->vsi;
 	u16 i = rx_ring->next_to_clean;
 	union i40e_rx_desc *rx_desc;
@@ -989,6 +1012,7 @@
 		cleaned_count++;
 		if (rx_hbo || rx_sph) {
 			int len;
+
 			if (rx_hbo)
 				len = I40E_RX_HDR_SIZE;
 			else
@@ -1162,9 +1186,6 @@
 		/* ERR_MASK will only have valid bits if EOP set */
 		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
-			/* TODO: shouldn't we increment a counter indicating the
-			 * drop?
-			 */
 			continue;
 		}
 
@@ -1196,6 +1217,21 @@
 	return total_rx_packets;
 }
 
+static u32 i40e_buildreg_itr(const int type, const u16 itr)
+{
+	u32 val;
+
+	val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+	      I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+	      (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+	      (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+
+	return val;
+}
+
+/* a small macro to shorten up some long lines */
+#define INTREG I40E_VFINT_DYN_CTLN1
+
 /**
  * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
  * @vsi: the VSI we care about
@@ -1206,55 +1242,67 @@
 					  struct i40e_q_vector *q_vector)
 {
 	struct i40e_hw *hw = &vsi->back->hw;
-	u16 old_itr;
+	bool rx = false, tx = false;
+	u32 rxval, txval;
 	int vector;
-	u32 val;
 
 	vector = (q_vector->v_idx + vsi->base_vector);
+
+	/* avoid dynamic calculation if in countdown mode OR if
+	 * all dynamic is disabled
+	 */
+	rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+
+	if (q_vector->itr_countdown > 0 ||
+	    (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
+	     !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+		goto enable_int;
+	}
+
 	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
-		old_itr = q_vector->rx.itr;
-		i40e_set_new_dynamic_itr(&q_vector->rx);
-		if (old_itr != q_vector->rx.itr) {
-			val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-			I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
-			(I40E_RX_ITR <<
-				I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
-			(q_vector->rx.itr <<
-				I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
-		} else {
-			val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-			I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
-			(I40E_ITR_NONE <<
-				I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
-		}
-		if (!test_bit(__I40E_DOWN, &vsi->state))
-			wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
-	} else {
-		i40evf_irq_enable_queues(vsi->back, 1
-			<< q_vector->v_idx);
+		rx = i40e_set_new_dynamic_itr(&q_vector->rx);
+		rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
 	}
 	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
-		old_itr = q_vector->tx.itr;
-		i40e_set_new_dynamic_itr(&q_vector->tx);
-		if (old_itr != q_vector->tx.itr) {
-			val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-				I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
-				(I40E_TX_ITR <<
-				   I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
-				(q_vector->tx.itr <<
-				   I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
-
-		} else {
-			val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
-				I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
-				(I40E_ITR_NONE <<
-				   I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
-		}
-		if (!test_bit(__I40E_DOWN, &vsi->state))
-			wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
-	} else {
-		i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
+		tx = i40e_set_new_dynamic_itr(&q_vector->tx);
+		txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
 	}
+	if (rx || tx) {
+		/* get the higher of the two ITR adjustments and
+		 * use the same value for both ITR registers
+		 * when in adaptive mode (Rx and/or Tx)
+		 */
+		u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
+
+		q_vector->tx.itr = q_vector->rx.itr = itr;
+		txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
+		tx = true;
+		rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
+		rx = true;
+	}
+
+	/* only need to enable the interrupt once, but need
+	 * to possibly update both ITR values
+	 */
+	if (rx) {
+		/* set the INTENA_MSK_MASK so that this first write
+		 * won't actually enable the interrupt, instead just
+		 * updating the ITR (it's bit 31 PF and VF)
+		 */
+		rxval |= BIT(31);
+		/* don't check _DOWN because interrupt isn't being enabled */
+		wr32(hw, INTREG(vector - 1), rxval);
+	}
+
+enable_int:
+	if (!test_bit(__I40E_DOWN, &vsi->state))
+		wr32(hw, INTREG(vector - 1), txval);
+
+	if (q_vector->itr_countdown)
+		q_vector->itr_countdown--;
+	else
+		q_vector->itr_countdown = ITR_COUNTDOWN_START;
+
 }
 
 /**
@@ -1275,7 +1323,7 @@
 	bool clean_complete = true;
 	bool arm_wb = false;
 	int budget_per_ring;
-	int cleaned;
+	int work_done = 0;
 
 	if (test_bit(__I40E_DOWN, &vsi->state)) {
 		napi_complete(napi);
@@ -1288,24 +1336,34 @@
 	i40e_for_each_ring(ring, q_vector->tx) {
 		clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
 		arm_wb |= ring->arm_wb;
+		ring->arm_wb = false;
 	}
 
+	/* Handle case where we are called by netpoll with a budget of 0 */
+	if (budget <= 0)
+		goto tx_only;
+
 	/* We attempt to distribute budget to each Rx queue fairly, but don't
 	 * allow the budget to go below 1 because that would exit polling early.
 	 */
 	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
 	i40e_for_each_ring(ring, q_vector->rx) {
+		int cleaned;
+
 		if (ring_is_ps_enabled(ring))
 			cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
 		else
 			cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+
+		work_done += cleaned;
 		/* if we didn't clean as many as budgeted, we must be done */
 		clean_complete &= (budget_per_ring != cleaned);
 	}
 
 	/* If work not completed, return budget and polling will return */
 	if (!clean_complete) {
+tx_only:
 		if (arm_wb)
 			i40evf_force_wb(vsi, q_vector);
 		return budget;
@@ -1315,7 +1373,7 @@
 		q_vector->arm_wb_state = false;
 
 	/* Work is done so exit the polling mode and re-enable the interrupt */
-	napi_complete(napi);
+	napi_complete_done(napi, work_done);
 	i40e_update_enable_itr(vsi, q_vector);
 	return 0;
 }
@@ -1359,6 +1417,7 @@
 	/* else if it is a SW VLAN, check the next protocol and store the tag */
 	} else if (protocol == htons(ETH_P_8021Q)) {
 		struct vlan_hdr *vhdr, _vhdr;
+
 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
 		if (!vhdr)
 			return -EINVAL;
@@ -1901,6 +1960,7 @@
 	u32 td_cmd = 0;
 	u8 hdr_len = 0;
 	int tso;
+
 	if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
 		return NETDEV_TX_BUSY;
 
@@ -1928,10 +1988,11 @@
 	else if (tso)
 		tx_flags |= I40E_TX_FLAGS_TSO;
 
-	if (i40e_chk_linearize(skb, tx_flags))
+	if (i40e_chk_linearize(skb, tx_flags)) {
 		if (skb_linearize(skb))
 			goto out_drop;
-
+		tx_ring->tx_stats.tx_linearize++;
+	}
 	skb_tx_timestamp(skb);
 
 	/* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index d5cb7ac..ebc1bf7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -32,11 +32,14 @@
 #define I40E_MAX_ITR               0x0FF0  /* reg uses 2 usec resolution */
 #define I40E_MIN_ITR               0x0001  /* reg uses 2 usec resolution */
 #define I40E_ITR_100K              0x0005
+#define I40E_ITR_50K               0x000A
 #define I40E_ITR_20K               0x0019
+#define I40E_ITR_18K               0x001B
 #define I40E_ITR_8K                0x003E
 #define I40E_ITR_4K                0x007A
-#define I40E_ITR_RX_DEF            I40E_ITR_8K
-#define I40E_ITR_TX_DEF            I40E_ITR_4K
+#define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
+#define I40E_ITR_RX_DEF            I40E_ITR_20K
+#define I40E_ITR_TX_DEF            I40E_ITR_20K
 #define I40E_ITR_DYNAMIC           0x8000  /* use top bit as a flag */
 #define I40E_MIN_INT_RATE          250     /* ~= 1000000 / (I40E_MAX_ITR * 2) */
 #define I40E_MAX_INT_RATE          500000  /* == 1000000 / (I40E_MIN_ITR * 2) */
@@ -44,6 +47,15 @@
 #define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
 #define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
 #define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
+ * the value of the rate limit is non-zero
+ */
+#define INTRL_ENA                  BIT(6)
+#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+#define I40E_INTRL_8K              125     /* 8000 ints/sec */
+#define I40E_INTRL_62K             16      /* 62500 ints/sec */
+#define I40E_INTRL_83K             12      /* 83333 ints/sec */
 
 #define I40E_QUEUE_END_OF_LIST 0x7FF
 
@@ -79,16 +91,16 @@
 	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
-		BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
-		BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
-		BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
-		BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
-		BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
-		BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
 
 #define i40e_pf_get_default_rss_hena(pf) \
 	(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
-		I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
+	  I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -164,6 +176,7 @@
 	};
 	unsigned int bytecount;
 	unsigned short gso_segs;
+
 	DEFINE_DMA_UNMAP_ADDR(dma);
 	DEFINE_DMA_UNMAP_LEN(len);
 	u32 tx_flags;
@@ -187,6 +200,7 @@
 	u64 restart_queue;
 	u64 tx_busy;
 	u64 tx_done_old;
+	u64 tx_linearize;
 };
 
 struct i40e_rx_queue_stats {
@@ -279,6 +293,7 @@
 	I40E_LOWEST_LATENCY = 0,
 	I40E_LOW_LATENCY = 1,
 	I40E_BULK_LATENCY = 2,
+	I40E_ULTRA_LATENCY = 3,
 };
 
 struct i40e_ring_container {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index ed71666..301fe2b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -33,29 +33,7 @@
 #include "i40e_adminq.h"
 #include "i40e_hmc.h"
 #include "i40e_lan_hmc.h"
-
-/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710		0x1572
-#define I40E_DEV_ID_QEMU		0x1574
-#define I40E_DEV_ID_KX_A		0x157F
-#define I40E_DEV_ID_KX_B		0x1580
-#define I40E_DEV_ID_KX_C		0x1581
-#define I40E_DEV_ID_QSFP_A		0x1583
-#define I40E_DEV_ID_QSFP_B		0x1584
-#define I40E_DEV_ID_QSFP_C		0x1585
-#define I40E_DEV_ID_10G_BASE_T		0x1586
-#define I40E_DEV_ID_20G_KR2		0x1587
-#define I40E_DEV_ID_VF			0x154C
-#define I40E_DEV_ID_VF_HV		0x1571
-#define I40E_DEV_ID_SFP_X722		0x37D0
-#define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
-#define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
-#define I40E_DEV_ID_X722_VF		0x37CD
-#define I40E_DEV_ID_X722_VF_HV		0x37D9
-
-#define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
-					 (d) == I40E_DEV_ID_QSFP_B  || \
-					 (d) == I40E_DEV_ID_QSFP_C)
+#include "i40e_devids.h"
 
 /* I40E_MASK is a macro used on 32 bit registers */
 #define I40E_MASK(mask, shift) (mask << shift)
@@ -158,14 +136,14 @@
 };
 
 enum i40e_vsi_type {
-	I40E_VSI_MAIN = 0,
-	I40E_VSI_VMDQ1,
-	I40E_VSI_VMDQ2,
-	I40E_VSI_CTRL,
-	I40E_VSI_FCOE,
-	I40E_VSI_MIRROR,
-	I40E_VSI_SRIOV,
-	I40E_VSI_FDIR,
+	I40E_VSI_MAIN	= 0,
+	I40E_VSI_VMDQ1	= 1,
+	I40E_VSI_VMDQ2	= 2,
+	I40E_VSI_CTRL	= 3,
+	I40E_VSI_FCOE	= 4,
+	I40E_VSI_MIRROR	= 5,
+	I40E_VSI_SRIOV	= 6,
+	I40E_VSI_FDIR	= 7,
 	I40E_VSI_TYPE_UNKNOWN
 };
 
@@ -189,16 +167,65 @@
 	bool crc_enable;
 	u8 pacing;
 	u8 requested_speeds;
+	u8 module_type[3];
+	/* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP		0x03
+#define I40E_MODULE_TYPE_QSFP		0x0D
+	/* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE	0x01
+#define I40E_MODULE_TYPE_40G_LR4	0x02
+#define I40E_MODULE_TYPE_40G_SR4	0x04
+#define I40E_MODULE_TYPE_40G_CR4	0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR	0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR	0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM	0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER	0x80
+	/* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX	0x01
+#define I40E_MODULE_TYPE_1000BASE_LX	0x02
+#define I40E_MODULE_TYPE_1000BASE_CX	0x04
+#define I40E_MODULE_TYPE_1000BASE_T	0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+	I40E_CAP_PHY_TYPE_SGMII		  = BIT(I40E_PHY_TYPE_SGMII),
+	I40E_CAP_PHY_TYPE_1000BASE_KX	  = BIT(I40E_PHY_TYPE_1000BASE_KX),
+	I40E_CAP_PHY_TYPE_10GBASE_KX4	  = BIT(I40E_PHY_TYPE_10GBASE_KX4),
+	I40E_CAP_PHY_TYPE_10GBASE_KR	  = BIT(I40E_PHY_TYPE_10GBASE_KR),
+	I40E_CAP_PHY_TYPE_40GBASE_KR4	  = BIT(I40E_PHY_TYPE_40GBASE_KR4),
+	I40E_CAP_PHY_TYPE_XAUI		  = BIT(I40E_PHY_TYPE_XAUI),
+	I40E_CAP_PHY_TYPE_XFI		  = BIT(I40E_PHY_TYPE_XFI),
+	I40E_CAP_PHY_TYPE_SFI		  = BIT(I40E_PHY_TYPE_SFI),
+	I40E_CAP_PHY_TYPE_XLAUI		  = BIT(I40E_PHY_TYPE_XLAUI),
+	I40E_CAP_PHY_TYPE_XLPPI		  = BIT(I40E_PHY_TYPE_XLPPI),
+	I40E_CAP_PHY_TYPE_40GBASE_CR4_CU  = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+	I40E_CAP_PHY_TYPE_10GBASE_CR1_CU  = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+	I40E_CAP_PHY_TYPE_10GBASE_AOC	  = BIT(I40E_PHY_TYPE_10GBASE_AOC),
+	I40E_CAP_PHY_TYPE_40GBASE_AOC	  = BIT(I40E_PHY_TYPE_40GBASE_AOC),
+	I40E_CAP_PHY_TYPE_100BASE_TX	  = BIT(I40E_PHY_TYPE_100BASE_TX),
+	I40E_CAP_PHY_TYPE_1000BASE_T	  = BIT(I40E_PHY_TYPE_1000BASE_T),
+	I40E_CAP_PHY_TYPE_10GBASE_T	  = BIT(I40E_PHY_TYPE_10GBASE_T),
+	I40E_CAP_PHY_TYPE_10GBASE_SR	  = BIT(I40E_PHY_TYPE_10GBASE_SR),
+	I40E_CAP_PHY_TYPE_10GBASE_LR	  = BIT(I40E_PHY_TYPE_10GBASE_LR),
+	I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+	I40E_CAP_PHY_TYPE_10GBASE_CR1	  = BIT(I40E_PHY_TYPE_10GBASE_CR1),
+	I40E_CAP_PHY_TYPE_40GBASE_CR4	  = BIT(I40E_PHY_TYPE_40GBASE_CR4),
+	I40E_CAP_PHY_TYPE_40GBASE_SR4	  = BIT(I40E_PHY_TYPE_40GBASE_SR4),
+	I40E_CAP_PHY_TYPE_40GBASE_LR4	  = BIT(I40E_PHY_TYPE_40GBASE_LR4),
+	I40E_CAP_PHY_TYPE_1000BASE_SX	  = BIT(I40E_PHY_TYPE_1000BASE_SX),
+	I40E_CAP_PHY_TYPE_1000BASE_LX	  = BIT(I40E_PHY_TYPE_1000BASE_LX),
+	I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL =
+					 BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+	I40E_CAP_PHY_TYPE_20GBASE_KR2	  = BIT(I40E_PHY_TYPE_20GBASE_KR2)
 };
 
 struct i40e_phy_info {
 	struct i40e_link_status link_info;
 	struct i40e_link_status link_info_old;
-	u32 autoneg_advertised;
-	u32 phy_id;
-	u32 module_type;
 	bool get_link_info;
 	enum i40e_media_type media_type;
+	/* all the phy types the NVM is capable of */
+	enum i40e_aq_capabilities_phy_type phy_types;
 };
 
 #define I40E_HW_CAP_MAX_GPIO			30
@@ -286,6 +313,7 @@
 	bool blank_nvm_mode;      /* is NVM empty (no FW present)*/
 	u16 version;              /* NVM package version */
 	u32 eetrack;              /* NVM data version */
+	u32 oem_ver;              /* OEM version info */
 };
 
 /* definitions used in NVM update support */
@@ -502,8 +530,9 @@
 	u16 dcbx_status;
 
 	/* DCBX info */
-	struct i40e_dcbx_config local_dcbx_config;
-	struct i40e_dcbx_config remote_dcbx_config;
+	struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+	struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+	struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
 
 	/* debug mask */
 	u32 debug_mask;
@@ -1026,8 +1055,8 @@
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT	23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
-				       BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK	(0x1FFUL << \
+					 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT	4
 #define I40E_TXD_FLTR_QW1_CMD_MASK	(0xFFFFULL << \
@@ -1170,6 +1199,7 @@
 /* Checksum and Shadow RAM pointers */
 #define I40E_SR_NVM_CONTROL_WORD		0x00
 #define I40E_SR_EMP_MODULE_PTR			0x0F
+#define I40E_NVM_OEM_VER_OFF			0x83
 #define I40E_SR_NVM_DEV_STARTER_VERSION		0x18
 #define I40E_SR_NVM_WAKE_ON_LAN			0x19
 #define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR	0x27
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index e6db20e..9f7b279 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -81,7 +81,6 @@
 	I40E_VIRTCHNL_OP_GET_STATS = 15,
 	I40E_VIRTCHNL_OP_FCOE = 16,
 	I40E_VIRTCHNL_OP_EVENT = 17,
-	I40E_VIRTCHNL_OP_CONFIG_RSS = 18,
 };
 
 /* Virtual channel message descriptor. This overlays the admin queue
@@ -151,6 +150,7 @@
 #define I40E_VIRTCHNL_VF_OFFLOAD_FCOE		0x00000004
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG	0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR	0x00000020
 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING	0x00020000
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 3817cbb..22fc3d4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -48,10 +48,6 @@
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 #define PFX "i40evf: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
-	((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
-	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
-		__func__ , ## args)))
 
 /* dummy struct to make common code less painful */
 struct i40e_vsi {
@@ -70,6 +66,7 @@
 	 */
 	u16 rx_itr_setting;
 	u16 tx_itr_setting;
+	u16 qs_handle;
 };
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -90,7 +87,7 @@
 #define I40EVF_MAX_RXBUFFER   16384  /* largest size for single descriptor */
 #define I40EVF_MAX_AQ_BUF_SIZE    4096
 #define I40EVF_AQ_LEN             32
-#define I40EVF_AQ_MAX_ERR         10 /* times to try before resetting AQ */
+#define I40EVF_AQ_MAX_ERR         20 /* times to try before resetting AQ */
 
 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
 
@@ -115,6 +112,8 @@
 	struct i40e_ring_container tx;
 	u32 ring_mask;
 	u8 num_ringpairs;	/* total number of ring pairs in vector */
+#define ITR_COUNTDOWN_START 100
+	u8 itr_countdown;	/* when 0 or 1 update ITR */
 	int v_idx;	  /* vector index in list */
 	char name[IFNAMSIZ + 9];
 	bool arm_wb_state;
@@ -214,7 +213,6 @@
 #define I40EVF_FLAG_RX_1BUF_CAPABLE              BIT(1)
 #define I40EVF_FLAG_RX_PS_CAPABLE                BIT(2)
 #define I40EVF_FLAG_RX_PS_ENABLED                BIT(3)
-#define I40EVF_FLAG_IN_NETPOLL                   BIT(4)
 #define I40EVF_FLAG_IMIR_ENABLED                 BIT(5)
 #define I40EVF_FLAG_MQ_CAPABLE                   BIT(6)
 #define I40EVF_FLAG_NEED_LINK_UPDATE             BIT(7)
@@ -223,10 +221,10 @@
 #define I40EVF_FLAG_RESET_NEEDED                 BIT(10)
 #define I40EVF_FLAG_WB_ON_ITR_CAPABLE		BIT(11)
 #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE	BIT(12)
+#define I40EVF_FLAG_ADDR_SET_BY_PF		BIT(13)
 /* duplicates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED		 0
 #define I40E_FLAG_DCB_ENABLED			 0
-#define I40E_FLAG_IN_NETPOLL			 I40EVF_FLAG_IN_NETPOLL
 #define I40E_FLAG_RX_CSUM_ENABLED                I40EVF_FLAG_RX_CSUM_ENABLED
 #define I40E_FLAG_WB_ON_ITR_CAPABLE		I40EVF_FLAG_WB_ON_ITR_CAPABLE
 #define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE	I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 5fc8204..d962164 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -34,7 +34,7 @@
 static const char i40evf_driver_string[] =
 	"Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "1.3.13"
+#define DRV_VERSION "1.3.33"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
 	"Copyright (c) 2013 - 2015 Intel Corporation.";
@@ -282,6 +282,7 @@
 /**
  * i40evf_irq_enable - Enable default interrupt generation settings
  * @adapter: board private structure
+ * @flush: boolean value whether to run rd32()
  **/
 void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
 {
@@ -305,15 +306,14 @@
 	struct i40evf_adapter *adapter = netdev_priv(netdev);
 	struct i40e_hw *hw = &adapter->hw;
 	u32 val;
-	u32 ena_mask;
 
 	/* handle non-queue interrupts */
-	val = rd32(hw, I40E_VFINT_ICR01);
-	ena_mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+	rd32(hw, I40E_VFINT_ICR01);
+	rd32(hw, I40E_VFINT_ICR0_ENA1);
 
 
-	val = rd32(hw, I40E_VFINT_DYN_CTL01);
-	val = val | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
+	val = rd32(hw, I40E_VFINT_DYN_CTL01) |
+	      I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
 	wr32(hw, I40E_VFINT_DYN_CTL01, val);
 
 	/* schedule work on the private workqueue */
@@ -334,7 +334,7 @@
 	if (!q_vector->tx.ring && !q_vector->rx.ring)
 		return IRQ_HANDLED;
 
-	napi_schedule(&q_vector->napi);
+	napi_schedule_irqoff(&q_vector->napi);
 
 	return IRQ_HANDLED;
 }
@@ -357,6 +357,7 @@
 	q_vector->rx.ring = rx_ring;
 	q_vector->rx.count++;
 	q_vector->rx.latency_range = I40E_LOW_LATENCY;
+	q_vector->itr_countdown = ITR_COUNTDOWN_START;
 }
 
 /**
@@ -377,6 +378,7 @@
 	q_vector->tx.ring = tx_ring;
 	q_vector->tx.count++;
 	q_vector->tx.latency_range = I40E_LOW_LATENCY;
+	q_vector->itr_countdown = ITR_COUNTDOWN_START;
 	q_vector->num_ringpairs++;
 	q_vector->ring_mask |= BIT(t_idx);
 }
@@ -444,6 +446,29 @@
 	return err;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * i40evf_netpoll - A Polling 'interrupt' handler
+ * @netdev: network interface device structure
+ *
+ * This is used by netconsole to send skbs without having to re-enable
+ * interrupts.  It's not called while the normal interrupt routine is executing.
+ **/
+static void i40evf_netpoll(struct net_device *netdev)
+{
+	struct i40evf_adapter *adapter = netdev_priv(netdev);
+	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
+	int i;
+
+	/* if interface is down do nothing */
+	if (test_bit(__I40E_DOWN, &adapter->vsi.state))
+		return;
+
+	for (i = 0; i < q_vectors; i++)
+		i40evf_msix_clean_rings(0, adapter->q_vector[i]);
+}
+
+#endif
 /**
  * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
  * @adapter: board private structure
@@ -489,8 +514,7 @@
 			q_vector);
 		if (err) {
 			dev_info(&adapter->pdev->dev,
-				 "%s: request_irq failed, error: %d\n",
-				__func__, err);
+				 "Request_irq failed, error: %d\n", err);
 			goto free_queue_irqs;
 		}
 		/* assign the mask for this irq */
@@ -731,6 +755,8 @@
 {
 	struct i40evf_adapter *adapter = netdev_priv(netdev);
 
+	if (!VLAN_ALLOWED(adapter))
+		return -EIO;
 	if (i40evf_add_vlan(adapter, vid) == NULL)
 		return -ENOMEM;
 	return 0;
@@ -746,8 +772,11 @@
 {
 	struct i40evf_adapter *adapter = netdev_priv(netdev);
 
-	i40evf_del_vlan(adapter, vid);
-	return 0;
+	if (VLAN_ALLOWED(adapter)) {
+		i40evf_del_vlan(adapter, vid);
+		return 0;
+	}
+	return -EIO;
 }
 
 /**
@@ -837,6 +866,15 @@
 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
 		return 0;
 
+	if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF)
+		return -EPERM;
+
+	f = i40evf_find_filter(adapter, hw->mac.addr);
+	if (f) {
+		f->remove = true;
+		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+	}
+
 	f = i40evf_add_filter(adapter, addr->sa_data);
 	if (f) {
 		ether_addr_copy(hw->mac.addr, addr->sa_data);
@@ -856,6 +894,7 @@
 	struct i40evf_mac_filter *f, *ftmp;
 	struct netdev_hw_addr *uca;
 	struct netdev_hw_addr *mca;
+	struct netdev_hw_addr *ha;
 	int count = 50;
 
 	/* add addr if not already in the filter list */
@@ -877,29 +916,27 @@
 	}
 	/* remove filter if not in netdev list */
 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
-		bool found = false;
+		netdev_for_each_mc_addr(mca, netdev)
+			if (ether_addr_equal(mca->addr, f->macaddr))
+				goto bottom_of_search_loop;
 
-		if (is_multicast_ether_addr(f->macaddr)) {
-			netdev_for_each_mc_addr(mca, netdev) {
-				if (ether_addr_equal(mca->addr, f->macaddr)) {
-					found = true;
-					break;
-				}
-			}
-		} else {
-			netdev_for_each_uc_addr(uca, netdev) {
-				if (ether_addr_equal(uca->addr, f->macaddr)) {
-					found = true;
-					break;
-				}
-			}
-			if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
-				found = true;
-		}
-		if (!found) {
-			f->remove = true;
-			adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
-		}
+		netdev_for_each_uc_addr(uca, netdev)
+			if (ether_addr_equal(uca->addr, f->macaddr))
+				goto bottom_of_search_loop;
+
+		for_each_dev_addr(netdev, ha)
+			if (ether_addr_equal(ha->addr, f->macaddr))
+				goto bottom_of_search_loop;
+
+		if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
+			goto bottom_of_search_loop;
+
+		/* f->macaddr wasn't found in uc, mc, or ha list so delete it */
+		f->remove = true;
+		adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+
+bottom_of_search_loop:
+		continue;
 	}
 	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 }
@@ -1111,6 +1148,8 @@
 		tx_ring->netdev = adapter->netdev;
 		tx_ring->dev = &adapter->pdev->dev;
 		tx_ring->count = adapter->tx_desc_count;
+		if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+			tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
 		adapter->tx_rings[i] = tx_ring;
 
 		rx_ring = &tx_ring[1];
@@ -1165,7 +1204,7 @@
 	for (vector = 0; vector < v_budget; vector++)
 		adapter->msix_entries[vector].entry = vector;
 
-	i40evf_acquire_msix_vectors(adapter, v_budget);
+	err = i40evf_acquire_msix_vectors(adapter, v_budget);
 
 out:
 	adapter->netdev->real_num_tx_queues = pairs;
@@ -1421,16 +1460,16 @@
 						      struct i40evf_adapter,
 						      watchdog_task);
 	struct i40e_hw *hw = &adapter->hw;
-	uint32_t rstat_val;
+	u32 reg_val;
 
 	if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
 		goto restart_watchdog;
 
 	if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
-		rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
-			    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-		if ((rstat_val == I40E_VFR_VFACTIVE) ||
-		    (rstat_val == I40E_VFR_COMPLETED)) {
+		reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
+			  I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+		if ((reg_val == I40E_VFR_VFACTIVE) ||
+		    (reg_val == I40E_VFR_COMPLETED)) {
 			/* A chance for redemption! */
 			dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
 			adapter->state = __I40EVF_STARTUP;
@@ -1455,11 +1494,8 @@
 		goto watchdog_done;
 
 	/* check for reset */
-	rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
-		    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-	if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
-	    (rstat_val != I40E_VFR_VFACTIVE) &&
-	    (rstat_val != I40E_VFR_COMPLETED)) {
+	reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
+	if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
 		adapter->state = __I40EVF_RESETTING;
 		adapter->flags |= I40EVF_FLAG_RESET_PENDING;
 		dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
@@ -1573,8 +1609,9 @@
 						      reset_task);
 	struct net_device *netdev = adapter->netdev;
 	struct i40e_hw *hw = &adapter->hw;
+	struct i40evf_vlan_filter *vlf;
 	struct i40evf_mac_filter *f;
-	uint32_t rstat_val;
+	u32 reg_val;
 	int i = 0, err;
 
 	while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
@@ -1595,12 +1632,11 @@
 
 	/* poll until we see the reset actually happen */
 	for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
-		rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
-			    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-		if ((rstat_val != I40E_VFR_VFACTIVE) &&
-		    (rstat_val != I40E_VFR_COMPLETED))
+		reg_val = rd32(hw, I40E_VF_ARQLEN1) &
+			  I40E_VF_ARQLEN1_ARQENABLE_MASK;
+		if (!reg_val)
 			break;
-		usleep_range(500, 1000);
+		usleep_range(5000, 10000);
 	}
 	if (i == I40EVF_RESET_WAIT_COUNT) {
 		dev_info(&adapter->pdev->dev, "Never saw reset\n");
@@ -1609,21 +1645,21 @@
 
 	/* wait until the reset is complete and the PF is responding to us */
 	for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
-		rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
-			    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-		if (rstat_val == I40E_VFR_VFACTIVE)
+		reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
+			  I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+		if (reg_val == I40E_VFR_VFACTIVE)
 			break;
 		msleep(I40EVF_RESET_WAIT_MS);
 	}
 	/* extra wait to make sure minimum wait is met */
 	msleep(I40EVF_RESET_WAIT_MS);
 	if (i == I40EVF_RESET_WAIT_COUNT) {
-		struct i40evf_mac_filter *f, *ftmp;
+		struct i40evf_mac_filter *ftmp;
 		struct i40evf_vlan_filter *fv, *fvtmp;
 
 		/* reset never finished */
 		dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
-			rstat_val);
+			reg_val);
 		adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
 
 		if (netif_running(adapter->netdev)) {
@@ -1697,8 +1733,8 @@
 		f->add = true;
 	}
 	/* re-add all VLAN filters */
-	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
-		f->add = true;
+	list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
+		vlf->add = true;
 	}
 	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
 	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
@@ -1853,8 +1889,7 @@
 		if (!err)
 			continue;
 		dev_err(&adapter->pdev->dev,
-			"%s: Allocation for Tx Queue %u failed\n",
-			__func__, i);
+			"Allocation for Tx Queue %u failed\n", i);
 		break;
 	}
 
@@ -1881,8 +1916,7 @@
 		if (!err)
 			continue;
 		dev_err(&adapter->pdev->dev,
-			"%s: Allocation for Rx Queue %u failed\n",
-			__func__, i);
+			"Allocation for Rx Queue %u failed\n", i);
 		break;
 	}
 	return err;
@@ -2041,6 +2075,9 @@
 	.ndo_tx_timeout		= i40evf_tx_timeout,
 	.ndo_vlan_rx_add_vid	= i40evf_vlan_rx_add_vid,
 	.ndo_vlan_rx_kill_vid	= i40evf_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= i40evf_netpoll,
+#endif
 };
 
 /**
@@ -2089,7 +2126,10 @@
 
 	if (adapter->vf_res->vf_offload_flags
 	    & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
-		netdev->vlan_features = netdev->features;
+		netdev->vlan_features = netdev->features &
+					~(NETIF_F_HW_VLAN_CTAG_TX |
+					  NETIF_F_HW_VLAN_CTAG_RX |
+					  NETIF_F_HW_VLAN_CTAG_FILTER);
 		netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
 				    NETIF_F_HW_VLAN_CTAG_RX |
 				    NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -2118,6 +2158,7 @@
 	adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
 				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
 	adapter->vsi.netdev = adapter->netdev;
+	adapter->vsi.qs_handle = adapter->vsi_res->qset_handle;
 	return 0;
 }
 
@@ -2246,10 +2287,13 @@
 	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
 		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
 			 adapter->hw.mac.addr);
-		random_ether_addr(adapter->hw.mac.addr);
+		eth_hw_addr_random(netdev);
+		ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+	} else {
+		adapter->flags |= I40EVF_FLAG_ADDR_SET_BY_PF;
+		ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+		ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
 	}
-	ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
-	ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
 
 	init_timer(&adapter->watchdog_timer);
 	adapter->watchdog_timer.function = &i40evf_watchdog_timer;
@@ -2265,6 +2309,9 @@
 	if (err)
 		goto err_sw_init;
 	i40evf_map_rings_to_vectors(adapter);
+	if (adapter->vf_res->vf_offload_flags &
+		    I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+		adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
 	if (!RSS_AQ(adapter))
 		i40evf_configure_rss(adapter);
 	err = i40evf_request_misc_irq(adapter);
@@ -2300,8 +2347,7 @@
 	}
 	return;
 restart:
-	schedule_delayed_work(&adapter->init_task,
-			      msecs_to_jiffies(50));
+	schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
 	return;
 
 err_register:
@@ -2314,11 +2360,14 @@
 err:
 	/* Things went into the weeds, so try again later */
 	if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
-		dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
+		dev_err(&pdev->dev, "Failed to communicate with PF; waiting before retry\n");
 		adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
-		return; /* do not reschedule */
+		i40evf_shutdown_adminq(hw);
+		adapter->state = __I40EVF_STARTUP;
+		schedule_delayed_work(&adapter->init_task, HZ * 5);
+		return;
 	}
-	schedule_delayed_work(&adapter->init_task, HZ * 3);
+	schedule_delayed_work(&adapter->init_task, HZ);
 }
 
 /**
@@ -2434,7 +2483,8 @@
 	INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
 	INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
 	INIT_DELAYED_WORK(&adapter->init_task, i40evf_init_task);
-	schedule_delayed_work(&adapter->init_task, 10);
+	schedule_delayed_work(&adapter->init_task,
+			      msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
 
 	return 0;
 
@@ -2510,6 +2560,7 @@
 	rtnl_lock();
 	err = i40evf_set_interrupt_capability(adapter);
 	if (err) {
+		rtnl_unlock();
 		dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
 		return err;
 	}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index d4eb1a5..32e620e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -156,7 +156,8 @@
 	caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
 	       I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
 	       I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
-	       I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+	       I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
+	       I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
 	adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
 	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
 	if (PF_IS_V11(adapter))
@@ -234,8 +235,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
@@ -288,8 +289,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
@@ -313,8 +314,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
@@ -341,8 +342,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
@@ -393,8 +394,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -410,8 +411,7 @@
 	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
 	      (count * sizeof(struct i40e_virtchnl_ether_addr));
 	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
-		dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
-			 __func__);
+		dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
 		count = (I40EVF_MAX_AQ_BUF_SIZE -
 			 sizeof(struct i40e_virtchnl_ether_addr_list)) /
 			sizeof(struct i40e_virtchnl_ether_addr);
@@ -453,8 +453,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -470,8 +470,7 @@
 	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
 	      (count * sizeof(struct i40e_virtchnl_ether_addr));
 	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
-		dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
-			 __func__);
+		dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
 		count = (I40EVF_MAX_AQ_BUF_SIZE -
 			 sizeof(struct i40e_virtchnl_ether_addr_list)) /
 			sizeof(struct i40e_virtchnl_ether_addr);
@@ -513,8 +512,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 
@@ -531,8 +530,7 @@
 	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
 	      (count * sizeof(u16));
 	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
-		dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
-			 __func__);
+		dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
 		count = (I40EVF_MAX_AQ_BUF_SIZE -
 			 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
 			sizeof(u16);
@@ -572,8 +570,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 
@@ -590,8 +588,7 @@
 	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
 	      (count * sizeof(u16));
 	if (len > I40EVF_MAX_AQ_BUF_SIZE) {
-		dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
-			 __func__);
+		dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
 		count = (I40EVF_MAX_AQ_BUF_SIZE -
 			 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
 			sizeof(u16);
@@ -629,8 +626,8 @@
 
 	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
 		/* bail because we already have a command pending */
-		dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
-			__func__, adapter->current_op);
+		dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
+			adapter->current_op);
 		return;
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
@@ -720,17 +717,16 @@
 			}
 			break;
 		default:
-			dev_err(&adapter->pdev->dev,
-				"%s: Unknown event %d from pf\n",
-				__func__, vpe->event);
+			dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
+				vpe->event);
 			break;
 		}
 		return;
 	}
 	if (v_retval) {
-		dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n",
-			__func__, v_retval,
-			i40evf_stat_str(&adapter->hw, v_retval), v_opcode);
+		dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
+			v_retval, i40evf_stat_str(&adapter->hw, v_retval),
+			v_opcode);
 	}
 	switch (v_opcode) {
 	case I40E_VIRTCHNL_OP_GET_STATS: {
@@ -756,6 +752,8 @@
 			  sizeof(struct i40e_virtchnl_vsi_resource);
 		memcpy(adapter->vf_res, msg, min(msglen, len));
 		i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+		/* restore current mac address */
+		ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
 		i40evf_process_config(adapter);
 		}
 		break;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 212d668..1a2f1cc 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -444,8 +444,8 @@
 
 	struct ptp_pin_desc sdp_config[IGB_N_SDP];
 	struct {
-		struct timespec start;
-		struct timespec period;
+		struct timespec64 start;
+		struct timespec64 period;
 	} perout[IGB_N_PEROUT];
 
 	char fw_version[32];
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7426276..2529bc6 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -842,10 +842,6 @@
 		sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->n_stats = IGB_STATS_LEN;
-	drvinfo->testinfo_len = IGB_TEST_LEN;
-	drvinfo->regdump_len = igb_get_regs_len(netdev);
-	drvinfo->eedump_len = igb_get_eeprom_len(netdev);
 }
 
 static void igb_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ba019fc..ea7b098 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -151,7 +151,7 @@
 #endif /* CONFIG_IGB_DCA */
 static int igb_poll(struct napi_struct *, int);
 static bool igb_clean_tx_irq(struct igb_q_vector *);
-static bool igb_clean_rx_irq(struct igb_q_vector *, int);
+static int igb_clean_rx_irq(struct igb_q_vector *, int);
 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
 static void igb_tx_timeout(struct net_device *);
 static void igb_reset_task(struct work_struct *);
@@ -5392,7 +5392,7 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 	struct ptp_clock_event event;
-	struct timespec ts;
+	struct timespec64 ts;
 	u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
 
 	if (tsicr & TSINTR_SYS_WRAP) {
@@ -5412,10 +5412,11 @@
 
 	if (tsicr & TSINTR_TT0) {
 		spin_lock(&adapter->tmreg_lock);
-		ts = timespec_add(adapter->perout[0].start,
-				  adapter->perout[0].period);
+		ts = timespec64_add(adapter->perout[0].start,
+				    adapter->perout[0].period);
+		/* u32 conversion of tv_sec is safe until y2106 */
 		wr32(E1000_TRGTTIML0, ts.tv_nsec);
-		wr32(E1000_TRGTTIMH0, ts.tv_sec);
+		wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
 		tsauxc = rd32(E1000_TSAUXC);
 		tsauxc |= TSAUXC_EN_TT0;
 		wr32(E1000_TSAUXC, tsauxc);
@@ -5426,10 +5427,10 @@
 
 	if (tsicr & TSINTR_TT1) {
 		spin_lock(&adapter->tmreg_lock);
-		ts = timespec_add(adapter->perout[1].start,
-				  adapter->perout[1].period);
+		ts = timespec64_add(adapter->perout[1].start,
+				    adapter->perout[1].period);
 		wr32(E1000_TRGTTIML1, ts.tv_nsec);
-		wr32(E1000_TRGTTIMH1, ts.tv_sec);
+		wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
 		tsauxc = rd32(E1000_TSAUXC);
 		tsauxc |= TSAUXC_EN_TT1;
 		wr32(E1000_TSAUXC, tsauxc);
@@ -6363,6 +6364,7 @@
 						     struct igb_q_vector,
 						     napi);
 	bool clean_complete = true;
+	int work_done = 0;
 
 #ifdef CONFIG_IGB_DCA
 	if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
@@ -6371,15 +6373,19 @@
 	if (q_vector->tx.ring)
 		clean_complete = igb_clean_tx_irq(q_vector);
 
-	if (q_vector->rx.ring)
-		clean_complete &= igb_clean_rx_irq(q_vector, budget);
+	if (q_vector->rx.ring) {
+		int cleaned = igb_clean_rx_irq(q_vector, budget);
+
+		work_done += cleaned;
+		clean_complete &= (cleaned < budget);
+	}
 
 	/* If all work not completed, return budget and keep polling */
 	if (!clean_complete)
 		return budget;
 
 	/* If not enough Rx work done, exit the polling mode */
-	napi_complete(napi);
+	napi_complete_done(napi, work_done);
 	igb_ring_irq_enable(q_vector);
 
 	return 0;
@@ -6903,7 +6909,7 @@
 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 }
 
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
 {
 	struct igb_ring *rx_ring = q_vector->rx.ring;
 	struct sk_buff *skb = rx_ring->skb;
@@ -6977,7 +6983,7 @@
 	if (cleaned_count)
 		igb_alloc_rx_buffers(rx_ring, cleaned_count);
 
-	return total_packets < budget;
+	return total_packets;
 }
 
 static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 5982f28..c44df87 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -143,7 +143,7 @@
 	 * sub-nanosecond resolution.
 	 */
 	wr32(E1000_SYSTIML, ts->tv_nsec);
-	wr32(E1000_SYSTIMH, ts->tv_sec);
+	wr32(E1000_SYSTIMH, (u32)ts->tv_sec);
 }
 
 /**
@@ -479,7 +479,7 @@
 	struct e1000_hw *hw = &igb->hw;
 	u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
 	unsigned long flags;
-	struct timespec ts;
+	struct timespec64 ts;
 	int use_freq = 0, pin = -1;
 	s64 ns;
 
@@ -523,14 +523,14 @@
 		}
 		ts.tv_sec = rq->perout.period.sec;
 		ts.tv_nsec = rq->perout.period.nsec;
-		ns = timespec_to_ns(&ts);
+		ns = timespec64_to_ns(&ts);
 		ns = ns >> 1;
 		if (on && ns <= 70000000LL) {
 			if (ns < 8LL)
 				return -EINVAL;
 			use_freq = 1;
 		}
-		ts = ns_to_timespec(ns);
+		ts = ns_to_timespec64(ns);
 		if (rq->perout.index == 1) {
 			if (use_freq) {
 				tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index c6996fe..b74ce53 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -196,8 +196,6 @@
 		sizeof(drvinfo->version));
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->regdump_len = igbvf_get_regs_len(netdev);
-	drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
 }
 
 static void igbvf_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index e86d41e..297af80 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1211,7 +1211,7 @@
 
 	/* If not enough Rx work done, exit the polling mode */
 	if (work_done < budget) {
-		napi_complete(napi);
+		napi_complete_done(napi, work_done);
 
 		if (adapter->requested_itr & 3)
 			igbvf_set_itr(adapter);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index b311e9e..d2b29b4 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -479,9 +479,6 @@
 		sizeof(drvinfo->version));
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->n_stats = IXGB_STATS_LEN;
-	drvinfo->regdump_len = ixgb_get_regs_len(netdev);
-	drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
 }
 
 static void
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index dda0f67..1d21745 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -152,9 +152,17 @@
 	u16 vlan_count;
 	u8 spoofchk_enabled;
 	bool rss_query_enabled;
+	u8 trusted;
+	int xcast_mode;
 	unsigned int vf_api;
 };
 
+enum ixgbevf_xcast_modes {
+	IXGBEVF_XCAST_MODE_NONE = 0,
+	IXGBEVF_XCAST_MODE_MULTI,
+	IXGBEVF_XCAST_MODE_ALLMULTI,
+};
+
 struct vf_macvlans {
 	struct list_head l;
 	int vf;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 94c4912..d681273 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -943,9 +943,6 @@
 
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->n_stats = IXGBE_STATS_LEN;
-	drvinfo->testinfo_len = IXGBE_TEST_LEN;
-	drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
 }
 
 static void ixgbe_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 1910039..47395ff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2775,7 +2775,7 @@
 				container_of(napi, struct ixgbe_q_vector, napi);
 	struct ixgbe_adapter *adapter = q_vector->adapter;
 	struct ixgbe_ring *ring;
-	int per_ring_budget;
+	int per_ring_budget, work_done = 0;
 	bool clean_complete = true;
 
 #ifdef CONFIG_IXGBE_DCA
@@ -2796,9 +2796,13 @@
 	else
 		per_ring_budget = budget;
 
-	ixgbe_for_each_ring(ring, q_vector->rx)
-		clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring,
-				   per_ring_budget) < per_ring_budget);
+	ixgbe_for_each_ring(ring, q_vector->rx) {
+		int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
+						 per_ring_budget);
+
+		work_done += cleaned;
+		clean_complete &= (cleaned < per_ring_budget);
+	}
 
 	ixgbe_qv_unlock_napi(q_vector);
 	/* If all work not completed, return budget and keep polling */
@@ -2806,7 +2810,7 @@
 		return budget;
 
 	/* all work done, exit the polling mode */
-	napi_complete(napi);
+	napi_complete_done(napi, work_done);
 	if (adapter->rx_itr_setting & 1)
 		ixgbe_set_itr(q_vector);
 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -3723,14 +3727,20 @@
 	hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
 					  adapter->num_vfs);
 
-	/* Ensure LLDP is set for Ethertype Antispoofing if we will be
+	/* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
 	 * calling set_ethertype_anti_spoofing for each VF in loop below
 	 */
-	if (hw->mac.ops.set_ethertype_anti_spoofing)
+	if (hw->mac.ops.set_ethertype_anti_spoofing) {
 		IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
-				(IXGBE_ETQF_FILTER_EN    | /* enable filter */
-				 IXGBE_ETQF_TX_ANTISPOOF | /* tx antispoof */
-				 IXGBE_ETH_P_LLDP));	   /* LLDP eth type */
+				(IXGBE_ETQF_FILTER_EN    |
+				 IXGBE_ETQF_TX_ANTISPOOF |
+				 IXGBE_ETH_P_LLDP));
+
+		IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
+				(IXGBE_ETQF_FILTER_EN |
+				 IXGBE_ETQF_TX_ANTISPOOF |
+				 ETH_P_PAUSE));
+	}
 
 	/* For VFs that have spoof checking turned off */
 	for (i = 0; i < adapter->num_vfs; i++) {
@@ -5301,7 +5311,6 @@
 	rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
 	adapter->ring_feature[RING_F_RSS].limit = rss;
 	adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
-	adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
 	adapter->max_q_vectors = MAX_Q_VECTORS_82599;
 	adapter->atr_sample_rate = 20;
 	fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
@@ -5327,7 +5336,6 @@
 	switch (hw->mac.type) {
 	case ixgbe_mac_82598EB:
 		adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
-		adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
 
 		if (hw->device_id == IXGBE_DEV_ID_82598AT)
 			adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
@@ -8399,6 +8407,7 @@
 	.ndo_set_vf_rate	= ixgbe_ndo_set_vf_bw,
 	.ndo_set_vf_spoofchk	= ixgbe_ndo_set_vf_spoofchk,
 	.ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
+	.ndo_set_vf_trust	= ixgbe_ndo_set_vf_trust,
 	.ndo_get_vf_config	= ixgbe_ndo_get_vf_config,
 	.ndo_get_stats64	= ixgbe_get_stats64,
 #ifdef CONFIG_IXGBE_DCB
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index b1e4703..8daa95f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -102,6 +102,8 @@
 #define IXGBE_VF_GET_RETA	0x0a	/* VF request for RETA */
 #define IXGBE_VF_GET_RSS_KEY	0x0b	/* get RSS key */
 
+#define IXGBE_VF_UPDATE_XCAST_MODE	0x0c
+
 /* length of permanent address message returned from PF */
 #define IXGBE_VF_PERMADDR_MSG_LEN 4
 /* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 1d17b58..fcd8b27 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -116,6 +116,12 @@
 			 * we want to disable the querying by default.
 			 */
 			adapter->vfinfo[i].rss_query_enabled = 0;
+
+			/* Untrust all VFs */
+			adapter->vfinfo[i].trusted = false;
+
+			/* set the default xcast mode */
+			adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE;
 		}
 
 		return 0;
@@ -1001,6 +1007,59 @@
 	return 0;
 }
 
+static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
+				      u32 *msgbuf, u32 vf)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	int xcast_mode = msgbuf[1];
+	u32 vmolr, disable, enable;
+
+	/* verify the PF is supporting the correct APIs */
+	switch (adapter->vfinfo[vf].vf_api) {
+	case ixgbe_mbox_api_12:
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI &&
+	    !adapter->vfinfo[vf].trusted) {
+		xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
+	}
+
+	if (adapter->vfinfo[vf].xcast_mode == xcast_mode)
+		goto out;
+
+	switch (xcast_mode) {
+	case IXGBEVF_XCAST_MODE_NONE:
+		disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+		enable = 0;
+		break;
+	case IXGBEVF_XCAST_MODE_MULTI:
+		disable = IXGBE_VMOLR_MPE;
+		enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
+		break;
+	case IXGBEVF_XCAST_MODE_ALLMULTI:
+		disable = 0;
+		enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+	vmolr &= ~disable;
+	vmolr |= enable;
+	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+	adapter->vfinfo[vf].xcast_mode = xcast_mode;
+
+out:
+	msgbuf[1] = xcast_mode;
+
+	return 0;
+}
+
 static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 {
 	u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1063,6 +1122,9 @@
 	case IXGBE_VF_GET_RSS_KEY:
 		retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf);
 		break;
+	case IXGBE_VF_UPDATE_XCAST_MODE:
+		retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
+		break;
 	default:
 		e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
 		retval = IXGBE_ERR_MBX;
@@ -1124,6 +1186,17 @@
 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
 }
 
+static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 ping;
+
+	ping = IXGBE_PF_CONTROL_MSG;
+	if (adapter->vfinfo[vf].clear_to_send)
+		ping |= IXGBE_VT_MSGTYPE_CTS;
+	ixgbe_write_mbx(hw, &ping, 1, vf);
+}
+
 void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -1416,6 +1489,28 @@
 	return 0;
 }
 
+int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+	if (vf >= adapter->num_vfs)
+		return -EINVAL;
+
+	/* nothing to do */
+	if (adapter->vfinfo[vf].trusted == setting)
+		return 0;
+
+	adapter->vfinfo[vf].trusted = setting;
+
+	/* reset VF to reconfigure features */
+	adapter->vfinfo[vf].clear_to_send = false;
+	ixgbe_ping_vf(adapter, vf);
+
+	e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not ");
+
+	return 0;
+}
+
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
 			    int vf, struct ifla_vf_info *ivi)
 {
@@ -1430,5 +1525,6 @@
 	ivi->qos = adapter->vfinfo[vf].pf_qos;
 	ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
 	ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled;
+	ivi->trusted = adapter->vfinfo[vf].trusted;
 	return 0;
 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 2c197e6..dad9257 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -49,6 +49,7 @@
 int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
 int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
 				  bool setting);
+int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
 			    int vf, struct ifla_vf_info *ivi);
 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 939c90c..995f031 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1752,6 +1752,9 @@
  *    FCoE (0x8906):         Filter 2
  *    1588 (0x88f7):         Filter 3
  *    FIP  (0x8914):         Filter 4
+ *    LLDP (0x88CC):         Filter 5
+ *    LACP (0x8809):         Filter 6
+ *    FC   (0x8808):         Filter 7
  */
 #define IXGBE_ETQF_FILTER_EAPOL          0
 #define IXGBE_ETQF_FILTER_FCOE           2
@@ -1759,6 +1762,7 @@
 #define IXGBE_ETQF_FILTER_FIP            4
 #define IXGBE_ETQF_FILTER_LLDP		 5
 #define IXGBE_ETQF_FILTER_LACP		 6
+#define IXGBE_ETQF_FILTER_FC		 7
 
 /* VLAN Control Bit Masks */
 #define IXGBE_VLNCTRL_VET       0x0000FFFF  /* bits 0-15 */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index ed7b289..ebe0ac9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -198,6 +198,7 @@
  * ixgbe_reset_cs4227 - Reset CS4227 using port expander
  * @hw: pointer to hardware structure
  *
+ * This function assumes that the caller has acquired the proper semaphore.
  * Returns error code
  */
 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
@@ -296,6 +297,14 @@
 		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
 		msleep(IXGBE_CS4227_CHECK_DELAY);
 	}
+	/* If still pending, assume other instance failed. */
+	if (retry == IXGBE_CS4227_RETRIES) {
+		status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+		if (status) {
+			hw_err(hw, "semaphore failed with %d\n", status);
+			return;
+		}
+	}
 
 	/* Reset the CS4227. */
 	status = ixgbe_reset_cs4227(hw);
@@ -1608,7 +1617,7 @@
 	if (status)
 		return status;
 
-	if (lsc)
+	if (lsc && phy->ops.setup_internal_link)
 		return phy->ops.setup_internal_link(hw);
 
 	return 0;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 04c7ec84..ec31472 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -471,6 +471,12 @@
 	board_X550EM_x_vf,
 };
 
+enum ixgbevf_xcast_modes {
+	IXGBEVF_XCAST_MODE_NONE = 0,
+	IXGBEVF_XCAST_MODE_MULTI,
+	IXGBEVF_XCAST_MODE_ALLMULTI,
+};
+
 extern const struct ixgbevf_info ixgbevf_82599_vf_info;
 extern const struct ixgbevf_info ixgbevf_X540_vf_info;
 extern const struct ixgbevf_info ixgbevf_X550_vf_info;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 35da2d7..592ff23 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1008,7 +1008,7 @@
 		container_of(napi, struct ixgbevf_q_vector, napi);
 	struct ixgbevf_adapter *adapter = q_vector->adapter;
 	struct ixgbevf_ring *ring;
-	int per_ring_budget;
+	int per_ring_budget, work_done = 0;
 	bool clean_complete = true;
 
 	ixgbevf_for_each_ring(ring, q_vector->tx)
@@ -1027,10 +1027,12 @@
 	else
 		per_ring_budget = budget;
 
-	ixgbevf_for_each_ring(ring, q_vector->rx)
-		clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
-							per_ring_budget)
-				   < per_ring_budget);
+	ixgbevf_for_each_ring(ring, q_vector->rx) {
+		int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
+						   per_ring_budget);
+		work_done += cleaned;
+		clean_complete &= (cleaned < per_ring_budget);
+	}
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
 	ixgbevf_qv_unlock_napi(q_vector);
@@ -1040,7 +1042,7 @@
 	if (!clean_complete)
 		return budget;
 	/* all work done, exit the polling mode */
-	napi_complete(napi);
+	napi_complete_done(napi, work_done);
 	if (adapter->rx_itr_setting & 1)
 		ixgbevf_set_itr(q_vector);
 	if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
@@ -1892,9 +1894,17 @@
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 	struct ixgbe_hw *hw = &adapter->hw;
+	unsigned int flags = netdev->flags;
+	int xcast_mode;
+
+	xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
+		     (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
+		     IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
 
 	spin_lock_bh(&adapter->mbx_lock);
 
+	hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
+
 	/* reprogram multicast list */
 	hw->mac.ops.update_mc_addr_list(hw, netdev);
 
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 82f44e0..340cdd4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -112,6 +112,8 @@
 #define IXGBE_VF_GET_RETA	0x0a	/* VF request for RETA */
 #define IXGBE_VF_GET_RSS_KEY	0x0b	/* get RSS hash key */
 
+#define IXGBE_VF_UPDATE_XCAST_MODE	0x0c
+
 /* length of permanent address message returned from PF */
 #define IXGBE_VF_PERMADDR_MSG_LEN	4
 /* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index d1339b0..427f360 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -469,6 +469,46 @@
 }
 
 /**
+ *  ixgbevf_update_xcast_mode - Update Multicast mode
+ *  @hw: pointer to the HW structure
+ *  @netdev: pointer to net device structure
+ *  @xcast_mode: new multicast mode
+ *
+ *  Updates the Multicast Mode of VF.
+ **/
+static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
+				     struct net_device *netdev, int xcast_mode)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	u32 msgbuf[2];
+	s32 err;
+
+	switch (hw->api_version) {
+	case ixgbe_mbox_api_12:
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
+	msgbuf[1] = xcast_mode;
+
+	err = mbx->ops.write_posted(hw, msgbuf, 2);
+	if (err)
+		return err;
+
+	err = mbx->ops.read_posted(hw, msgbuf, 2);
+	if (err)
+		return err;
+
+	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+		return -EPERM;
+
+	return 0;
+}
+
+/**
  *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
  *  @hw: pointer to the HW structure
  *  @vlan: 12 bit VLAN ID
@@ -727,6 +767,7 @@
 	.check_link		= ixgbevf_check_mac_link_vf,
 	.set_rar		= ixgbevf_set_rar_vf,
 	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
+	.update_xcast_mode	= ixgbevf_update_xcast_mode,
 	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
 	.set_vfta		= ixgbevf_set_vfta_vf,
 };
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index d40f036..ef9f773 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -63,6 +63,7 @@
 	s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
 	s32 (*init_rx_addrs)(struct ixgbe_hw *);
 	s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
+	s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int);
 	s32 (*enable_mc)(struct ixgbe_hw *);
 	s32 (*disable_mc)(struct ixgbe_hw *);
 	s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index c78ae18..6bf7259 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -759,11 +759,23 @@
 
 	desc->l4i_chk = 0;
 	desc->byte_cnt = length;
-	desc->buf_ptr = dma_map_single(dev->dev.parent, data,
-				       length, DMA_TO_DEVICE);
-	if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
-		WARN(1, "dma_map_single failed!\n");
-		return -ENOMEM;
+
+	if (length <= 8 && (uintptr_t)data & 0x7) {
+		/* Copy unaligned small data fragment to TSO header data area */
+		memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE,
+		       data, length);
+		desc->buf_ptr = txq->tso_hdrs_dma
+			+ txq->tx_curr_desc * TSO_HEADER_SIZE;
+	} else {
+		/* Alignment is okay, map buffer and hand off to hardware */
+		txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
+		desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+			length, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(dev->dev.parent,
+					       desc->buf_ptr))) {
+			WARN(1, "dma_map_single failed!\n");
+			return -ENOMEM;
+		}
 	}
 
 	cmd_sts = BUFFER_OWNED_BY_DMA;
@@ -779,7 +791,8 @@
 }
 
 static inline void
-txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
+		u32 *first_cmd_sts, bool first_desc)
 {
 	struct mv643xx_eth_private *mp = txq_to_mp(txq);
 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -788,6 +801,7 @@
 	int ret;
 	u32 cmd_csum = 0;
 	u16 l4i_chk = 0;
+	u32 cmd_sts;
 
 	tx_index = txq->tx_curr_desc;
 	desc = &txq->tx_desc_area[tx_index];
@@ -803,9 +817,17 @@
 	desc->byte_cnt = hdr_len;
 	desc->buf_ptr = txq->tso_hdrs_dma +
 			txq->tx_curr_desc * TSO_HEADER_SIZE;
-	desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
+	cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA  | TX_FIRST_DESC |
 				   GEN_CRC;
 
+	/* Defer updating the first command descriptor until all
+	 * following descriptors have been written.
+	 */
+	if (first_desc)
+		*first_cmd_sts = cmd_sts;
+	else
+		desc->cmd_sts = cmd_sts;
+
 	txq->tx_curr_desc++;
 	if (txq->tx_curr_desc == txq->tx_ring_size)
 		txq->tx_curr_desc = 0;
@@ -819,6 +841,8 @@
 	int desc_count = 0;
 	struct tso_t tso;
 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+	struct tx_desc *first_tx_desc;
+	u32 first_cmd_sts = 0;
 
 	/* Count needed descriptors */
 	if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
@@ -826,11 +850,14 @@
 		return -EBUSY;
 	}
 
+	first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
+
 	/* Initialize the TSO handler, and prepare the first payload */
 	tso_start(skb, &tso);
 
 	total_len = skb->len - hdr_len;
 	while (total_len > 0) {
+		bool first_desc = (desc_count == 0);
 		char *hdr;
 
 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
@@ -840,7 +867,8 @@
 		/* prepare packet headers: MAC + IP + TCP */
 		hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
-		txq_put_hdr_tso(skb, txq, data_left);
+		txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
+				first_desc);
 
 		while (data_left > 0) {
 			int size;
@@ -860,6 +888,10 @@
 	__skb_queue_tail(&txq->tx_skb, skb);
 	skb_tx_timestamp(skb);
 
+	/* ensure all other descriptors are written before first cmd_sts */
+	wmb();
+	first_tx_desc->cmd_sts = first_cmd_sts;
+
 	/* clear TX_END status */
 	mp->work_tx_end &= ~(1 << txq->index);
 
@@ -1586,7 +1618,6 @@
 		sizeof(drvinfo->version));
 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
-	drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
 }
 
 static int mv643xx_eth_nway_reset(struct net_device *dev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 514df76..a47496a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -32,6 +32,7 @@
 #include <linux/of_address.h>
 #include <linux/phy.h>
 #include <linux/clk.h>
+#include <linux/cpu.h>
 
 /* Registers */
 #define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
@@ -100,6 +101,8 @@
 #define MVNETA_TXQ_CMD                           0x2448
 #define      MVNETA_TXQ_DISABLE_SHIFT            8
 #define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
+#define MVNETA_RX_DISCARD_FRAME_COUNT		 0x2484
+#define MVNETA_OVERRUN_FRAME_COUNT		 0x2488
 #define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
 #define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
 #define MVNETA_ACC_MODE                          0x2500
@@ -191,7 +194,7 @@
 #define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 #define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
-#define MVNETA_MIB_COUNTERS_BASE                 0x3080
+#define MVNETA_MIB_COUNTERS_BASE                 0x3000
 #define      MVNETA_MIB_LATE_COLLISION           0x7c
 #define MVNETA_DA_FILT_SPEC_MCAST                0x3400
 #define MVNETA_DA_FILT_OTH_MCAST                 0x3500
@@ -277,6 +280,50 @@
 
 #define MVNETA_RX_BUF_SIZE(pkt_size)   ((pkt_size) + NET_SKB_PAD)
 
+struct mvneta_statistic {
+	unsigned short offset;
+	unsigned short type;
+	const char name[ETH_GSTRING_LEN];
+};
+
+#define T_REG_32	32
+#define T_REG_64	64
+
+static const struct mvneta_statistic mvneta_statistics[] = {
+	{ 0x3000, T_REG_64, "good_octets_received", },
+	{ 0x3010, T_REG_32, "good_frames_received", },
+	{ 0x3008, T_REG_32, "bad_octets_received", },
+	{ 0x3014, T_REG_32, "bad_frames_received", },
+	{ 0x3018, T_REG_32, "broadcast_frames_received", },
+	{ 0x301c, T_REG_32, "multicast_frames_received", },
+	{ 0x3050, T_REG_32, "unrec_mac_control_received", },
+	{ 0x3058, T_REG_32, "good_fc_received", },
+	{ 0x305c, T_REG_32, "bad_fc_received", },
+	{ 0x3060, T_REG_32, "undersize_received", },
+	{ 0x3064, T_REG_32, "fragments_received", },
+	{ 0x3068, T_REG_32, "oversize_received", },
+	{ 0x306c, T_REG_32, "jabber_received", },
+	{ 0x3070, T_REG_32, "mac_receive_error", },
+	{ 0x3074, T_REG_32, "bad_crc_event", },
+	{ 0x3078, T_REG_32, "collision", },
+	{ 0x307c, T_REG_32, "late_collision", },
+	{ 0x2484, T_REG_32, "rx_discard", },
+	{ 0x2488, T_REG_32, "rx_overrun", },
+	{ 0x3020, T_REG_32, "frames_64_octets", },
+	{ 0x3024, T_REG_32, "frames_65_to_127_octets", },
+	{ 0x3028, T_REG_32, "frames_128_to_255_octets", },
+	{ 0x302c, T_REG_32, "frames_256_to_511_octets", },
+	{ 0x3030, T_REG_32, "frames_512_to_1023_octets", },
+	{ 0x3034, T_REG_32, "frames_1024_to_max_octets", },
+	{ 0x3038, T_REG_64, "good_octets_sent", },
+	{ 0x3040, T_REG_32, "good_frames_sent", },
+	{ 0x3044, T_REG_32, "excessive_collision", },
+	{ 0x3048, T_REG_32, "multicast_frames_sent", },
+	{ 0x304c, T_REG_32, "broadcast_frames_sent", },
+	{ 0x3054, T_REG_32, "fc_sent", },
+	{ 0x300c, T_REG_32, "internal_mac_transmit_err", },
+};
+
 struct mvneta_pcpu_stats {
 	struct	u64_stats_sync syncp;
 	u64	rx_packets;
@@ -285,23 +332,34 @@
 	u64	tx_bytes;
 };
 
+struct mvneta_pcpu_port {
+	/* Pointer to the shared port */
+	struct mvneta_port	*pp;
+
+	/* Pointer to the CPU-local NAPI struct */
+	struct napi_struct	napi;
+
+	/* Cause of the previous interrupt */
+	u32			cause_rx_tx;
+};
+
 struct mvneta_port {
+	struct mvneta_pcpu_port __percpu	*ports;
+	struct mvneta_pcpu_stats __percpu	*stats;
+
 	int pkt_size;
 	unsigned int frag_size;
 	void __iomem *base;
 	struct mvneta_rx_queue *rxqs;
 	struct mvneta_tx_queue *txqs;
 	struct net_device *dev;
-
-	u32 cause_rx_tx;
-	struct napi_struct napi;
+	struct notifier_block cpu_notifier;
 
 	/* Core clock */
 	struct clk *clk;
 	u8 mcast_count[256];
 	u16 tx_ring_size;
 	u16 rx_ring_size;
-	struct mvneta_pcpu_stats *stats;
 
 	struct mii_bus *mii_bus;
 	struct phy_device *phy_dev;
@@ -312,6 +370,8 @@
 	unsigned int speed;
 	unsigned int tx_csum_limit;
 	int use_inband_status:1;
+
+	u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
 };
 
 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -468,7 +528,7 @@
 /* The hardware supports eight (8) rx queues, but we are only allowing
  * the first one to be used. Therefore, let's just allocate one queue.
  */
-static int rxq_number = 1;
+static int rxq_number = 8;
 static int txq_number = 8;
 
 static int rxq_def;
@@ -518,6 +578,8 @@
 	/* Perform dummy reads from MIB counters */
 	for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
 		dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+	dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
+	dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
 }
 
 /* Get System Network Statistics */
@@ -746,7 +808,6 @@
 	u32 q_map;
 
 	/* Enable all initialized TXs. */
-	mvneta_mib_counters_clear(pp);
 	q_map = 0;
 	for (queue = 0; queue < txq_number; queue++) {
 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
@@ -756,14 +817,7 @@
 	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
 
 	/* Enable all initialized RXQs. */
-	q_map = 0;
-	for (queue = 0; queue < rxq_number; queue++) {
-		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
-		if (rxq->descs != NULL)
-			q_map |= (1 << queue);
-	}
-
-	mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
+	mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def));
 }
 
 /* Stop the Ethernet port activity */
@@ -949,7 +1003,7 @@
 	/* Set CPU queue access map - all CPUs have access to all RX
 	 * queues and to all TX queues
 	 */
-	for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
+	for_each_present_cpu(cpu)
 		mvreg_write(pp, MVNETA_CPU_MAP(cpu),
 			    (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
 			     MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
@@ -1030,6 +1084,8 @@
 	mvreg_write(pp, MVNETA_INTR_ENABLE,
 		    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
 		     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
+
+	mvneta_mib_counters_clear(pp);
 }
 
 /* Set max sizes for tx queues */
@@ -1426,17 +1482,6 @@
 	return MVNETA_TX_L4_CSUM_NOT;
 }
 
-/* Returns rx queue pointer (find last set bit) according to causeRxTx
- * value
- */
-static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
-						u32 cause)
-{
-	int queue = fls(cause >> 8) - 1;
-
-	return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
-}
-
 /* Drop packets received by the RXQ and free buffers */
 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 				 struct mvneta_rx_queue *rxq)
@@ -1461,6 +1506,7 @@
 static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
 		     struct mvneta_rx_queue *rxq)
 {
+	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
 	struct net_device *dev = pp->dev;
 	int rx_done;
 	u32 rcvd_pkts = 0;
@@ -1515,7 +1561,7 @@
 
 			skb->protocol = eth_type_trans(skb, dev);
 			mvneta_rx_csum(pp, rx_status, skb);
-			napi_gro_receive(&pp->napi, skb);
+			napi_gro_receive(&port->napi, skb);
 
 			rcvd_pkts++;
 			rcvd_bytes += rx_bytes;
@@ -1550,7 +1596,7 @@
 
 		mvneta_rx_csum(pp, rx_status, skb);
 
-		napi_gro_receive(&pp->napi, skb);
+		napi_gro_receive(&port->napi, skb);
 	}
 
 	if (rcvd_pkts) {
@@ -2061,12 +2107,10 @@
 /* Interrupt handling - the callback for request_irq() */
 static irqreturn_t mvneta_isr(int irq, void *dev_id)
 {
-	struct mvneta_port *pp = (struct mvneta_port *)dev_id;
+	struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
 
-	/* Mask all interrupts */
-	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
-
-	napi_schedule(&pp->napi);
+	disable_percpu_irq(port->pp->dev->irq);
+	napi_schedule(&port->napi);
 
 	return IRQ_HANDLED;
 }
@@ -2104,11 +2148,11 @@
 {
 	int rx_done = 0;
 	u32 cause_rx_tx;
-	unsigned long flags;
 	struct mvneta_port *pp = netdev_priv(napi->dev);
+	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
 
 	if (!netif_running(pp->dev)) {
-		napi_complete(napi);
+		napi_complete(&port->napi);
 		return rx_done;
 	}
 
@@ -2135,47 +2179,17 @@
 	/* For the case where the last mvneta_poll did not process all
 	 * RX packets
 	 */
-	cause_rx_tx |= pp->cause_rx_tx;
-	if (rxq_number > 1) {
-		while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
-			int count;
-			struct mvneta_rx_queue *rxq;
-			/* get rx queue number from cause_rx_tx */
-			rxq = mvneta_rx_policy(pp, cause_rx_tx);
-			if (!rxq)
-				break;
-
-			/* process the packet in that rx queue */
-			count = mvneta_rx(pp, budget, rxq);
-			rx_done += count;
-			budget -= count;
-			if (budget > 0) {
-				/* set off the rx bit of the
-				 * corresponding bit in the cause rx
-				 * tx register, so that next iteration
-				 * will find the next rx queue where
-				 * packets are received on
-				 */
-				cause_rx_tx &= ~((1 << rxq->id) << 8);
-			}
-		}
-	} else {
-		rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
-		budget -= rx_done;
-	}
+	cause_rx_tx |= port->cause_rx_tx;
+	rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
+	budget -= rx_done;
 
 	if (budget > 0) {
 		cause_rx_tx = 0;
-		napi_complete(napi);
-		local_irq_save(flags);
-		mvreg_write(pp, MVNETA_INTR_NEW_MASK,
-			    MVNETA_RX_INTR_MASK(rxq_number) |
-			    MVNETA_TX_INTR_MASK(txq_number) |
-			    MVNETA_MISCINTR_INTR_MASK);
-		local_irq_restore(flags);
+		napi_complete(&port->napi);
+		enable_percpu_irq(pp->dev->irq, 0);
 	}
 
-	pp->cause_rx_tx = cause_rx_tx;
+	port->cause_rx_tx = cause_rx_tx;
 	return rx_done;
 }
 
@@ -2379,26 +2393,19 @@
 /* Cleanup all Rx queues */
 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
 {
-	int queue;
-
-	for (queue = 0; queue < rxq_number; queue++)
-		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
+	mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]);
 }
 
 
 /* Init all Rx queues */
 static int mvneta_setup_rxqs(struct mvneta_port *pp)
 {
-	int queue;
-
-	for (queue = 0; queue < rxq_number; queue++) {
-		int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
-		if (err) {
-			netdev_err(pp->dev, "%s: can't create rxq=%d\n",
-				   __func__, queue);
-			mvneta_cleanup_rxqs(pp);
-			return err;
-		}
+	int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]);
+	if (err) {
+		netdev_err(pp->dev, "%s: can't create rxq=%d\n",
+			   __func__, rxq_def);
+		mvneta_cleanup_rxqs(pp);
+		return err;
 	}
 
 	return 0;
@@ -2424,6 +2431,8 @@
 
 static void mvneta_start_dev(struct mvneta_port *pp)
 {
+	unsigned int cpu;
+
 	mvneta_max_rx_size_set(pp, pp->pkt_size);
 	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
 
@@ -2431,7 +2440,11 @@
 	mvneta_port_enable(pp);
 
 	/* Enable polling on the port */
-	napi_enable(&pp->napi);
+	for_each_present_cpu(cpu) {
+		struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+		napi_enable(&port->napi);
+	}
 
 	/* Unmask interrupts */
 	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
@@ -2449,9 +2462,15 @@
 
 static void mvneta_stop_dev(struct mvneta_port *pp)
 {
+	unsigned int cpu;
+
 	phy_stop(pp->phy_dev);
 
-	napi_disable(&pp->napi);
+	for_each_present_cpu(cpu) {
+		struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+		napi_disable(&port->napi);
+	}
 
 	netif_carrier_off(pp->dev);
 
@@ -2691,6 +2710,125 @@
 	pp->phy_dev = NULL;
 }
 
+static void mvneta_percpu_enable(void *arg)
+{
+	struct mvneta_port *pp = arg;
+
+	enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
+}
+
+static void mvneta_percpu_disable(void *arg)
+{
+	struct mvneta_port *pp = arg;
+
+	disable_percpu_irq(pp->dev->irq);
+}
+
+static void mvneta_percpu_elect(struct mvneta_port *pp)
+{
+	int online_cpu_idx, cpu, i = 0;
+
+	online_cpu_idx = rxq_def % num_online_cpus();
+
+	for_each_online_cpu(cpu) {
+		if (i == online_cpu_idx)
+			/* Enable per-CPU interrupt on the one CPU we
+			 * just elected
+			 */
+			smp_call_function_single(cpu, mvneta_percpu_enable,
+						pp, true);
+		else
+			/* Disable per-CPU interrupt on all the other CPU */
+			smp_call_function_single(cpu, mvneta_percpu_disable,
+						pp, true);
+		i++;
+	}
+};
+
+static int mvneta_percpu_notifier(struct notifier_block *nfb,
+				  unsigned long action, void *hcpu)
+{
+	struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
+					      cpu_notifier);
+	int cpu = (unsigned long)hcpu, other_cpu;
+	struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_ONLINE_FROZEN:
+		netif_tx_stop_all_queues(pp->dev);
+
+		/* We have to synchronise on tha napi of each CPU
+		 * except the one just being waked up
+		 */
+		for_each_online_cpu(other_cpu) {
+			if (other_cpu != cpu) {
+				struct mvneta_pcpu_port *other_port =
+					per_cpu_ptr(pp->ports, other_cpu);
+
+				napi_synchronize(&other_port->napi);
+			}
+		}
+
+		/* Mask all ethernet port interrupts */
+		mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+		mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+		mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+		napi_enable(&port->napi);
+
+		/* Enable per-CPU interrupt on the one CPU we care
+		 * about.
+		 */
+		mvneta_percpu_elect(pp);
+
+		/* Unmask all ethernet port interrupts */
+		mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+			MVNETA_RX_INTR_MASK(rxq_number) |
+			MVNETA_TX_INTR_MASK(txq_number) |
+			MVNETA_MISCINTR_INTR_MASK);
+		mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+			MVNETA_CAUSE_PHY_STATUS_CHANGE |
+			MVNETA_CAUSE_LINK_CHANGE |
+			MVNETA_CAUSE_PSC_SYNC_CHANGE);
+		netif_tx_start_all_queues(pp->dev);
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		netif_tx_stop_all_queues(pp->dev);
+		/* Mask all ethernet port interrupts */
+		mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+		mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+		mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+
+		napi_synchronize(&port->napi);
+		napi_disable(&port->napi);
+		/* Disable per-CPU interrupts on the CPU that is
+		 * brought down.
+		 */
+		smp_call_function_single(cpu, mvneta_percpu_disable,
+					 pp, true);
+
+		break;
+	case CPU_DEAD:
+	case CPU_DEAD_FROZEN:
+		/* Check if a new CPU must be elected now this on is down */
+		mvneta_percpu_elect(pp);
+		/* Unmask all ethernet port interrupts */
+		mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+			MVNETA_RX_INTR_MASK(rxq_number) |
+			MVNETA_TX_INTR_MASK(txq_number) |
+			MVNETA_MISCINTR_INTR_MASK);
+		mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+			MVNETA_CAUSE_PHY_STATUS_CHANGE |
+			MVNETA_CAUSE_LINK_CHANGE |
+			MVNETA_CAUSE_PSC_SYNC_CHANGE);
+		netif_tx_start_all_queues(pp->dev);
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
 static int mvneta_open(struct net_device *dev)
 {
 	struct mvneta_port *pp = netdev_priv(dev);
@@ -2709,13 +2847,29 @@
 		goto err_cleanup_rxqs;
 
 	/* Connect to port interrupt line */
-	ret = request_irq(pp->dev->irq, mvneta_isr, 0,
-			  MVNETA_DRIVER_NAME, pp);
+	ret = request_percpu_irq(pp->dev->irq, mvneta_isr,
+				 MVNETA_DRIVER_NAME, pp->ports);
 	if (ret) {
 		netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
 		goto err_cleanup_txqs;
 	}
 
+	/* Even though the documentation says that request_percpu_irq
+	 * doesn't enable the interrupts automatically, it actually
+	 * does so on the local CPU.
+	 *
+	 * Make sure it's disabled.
+	 */
+	mvneta_percpu_disable(pp);
+
+	/* Elect a CPU to handle our RX queue interrupt */
+	mvneta_percpu_elect(pp);
+
+	/* Register a CPU notifier to handle the case where our CPU
+	 * might be taken offline.
+	 */
+	register_cpu_notifier(&pp->cpu_notifier);
+
 	/* In default link is down */
 	netif_carrier_off(pp->dev);
 
@@ -2730,7 +2884,7 @@
 	return 0;
 
 err_free_irq:
-	free_irq(pp->dev->irq, pp);
+	free_percpu_irq(pp->dev->irq, pp->ports);
 err_cleanup_txqs:
 	mvneta_cleanup_txqs(pp);
 err_cleanup_rxqs:
@@ -2742,10 +2896,14 @@
 static int mvneta_stop(struct net_device *dev)
 {
 	struct mvneta_port *pp = netdev_priv(dev);
+	int cpu;
 
 	mvneta_stop_dev(pp);
 	mvneta_mdio_remove(pp);
-	free_irq(dev->irq, pp);
+	unregister_cpu_notifier(&pp->cpu_notifier);
+	for_each_present_cpu(cpu)
+		smp_call_function_single(cpu, mvneta_percpu_disable, pp, true);
+	free_percpu_irq(dev->irq, pp->ports);
 	mvneta_cleanup_rxqs(pp);
 	mvneta_cleanup_txqs(pp);
 
@@ -2875,6 +3033,65 @@
 	return 0;
 }
 
+static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
+				       u8 *data)
+{
+	if (sset == ETH_SS_STATS) {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       mvneta_statistics[i].name, ETH_GSTRING_LEN);
+	}
+}
+
+static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
+{
+	const struct mvneta_statistic *s;
+	void __iomem *base = pp->base;
+	u32 high, low, val;
+	int i;
+
+	for (i = 0, s = mvneta_statistics;
+	     s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
+	     s++, i++) {
+		val = 0;
+
+		switch (s->type) {
+		case T_REG_32:
+			val = readl_relaxed(base + s->offset);
+			break;
+		case T_REG_64:
+			/* Docs say to read low 32-bit then high */
+			low = readl_relaxed(base + s->offset);
+			high = readl_relaxed(base + s->offset + 4);
+			val = (u64)high << 32 | low;
+			break;
+		}
+
+		pp->ethtool_stats[i] += val;
+	}
+}
+
+static void mvneta_ethtool_get_stats(struct net_device *dev,
+				     struct ethtool_stats *stats, u64 *data)
+{
+	struct mvneta_port *pp = netdev_priv(dev);
+	int i;
+
+	mvneta_ethtool_update_stats(pp);
+
+	for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
+		*data++ = pp->ethtool_stats[i];
+}
+
+static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+	if (sset == ETH_SS_STATS)
+		return ARRAY_SIZE(mvneta_statistics);
+	return -EOPNOTSUPP;
+}
+
 static const struct net_device_ops mvneta_netdev_ops = {
 	.ndo_open            = mvneta_open,
 	.ndo_stop            = mvneta_stop,
@@ -2896,6 +3113,9 @@
 	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
 	.get_ringparam  = mvneta_ethtool_get_ringparam,
 	.set_ringparam	= mvneta_ethtool_set_ringparam,
+	.get_strings	= mvneta_ethtool_get_strings,
+	.get_ethtool_stats = mvneta_ethtool_get_stats,
+	.get_sset_count	= mvneta_ethtool_get_sset_count,
 };
 
 /* Initialize hw */
@@ -3032,14 +3252,7 @@
 	const char *managed;
 	int phy_mode;
 	int err;
-
-	/* Our multiqueue support is not complete, so for now, only
-	 * allow the usage of the first RX queue
-	 */
-	if (rxq_def != 0) {
-		dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
-		return -EINVAL;
-	}
+	int cpu;
 
 	dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
 	if (!dev)
@@ -3091,6 +3304,7 @@
 	err = of_property_read_string(dn, "managed", &managed);
 	pp->use_inband_status = (err == 0 &&
 				 strcmp(managed, "in-band-status") == 0);
+	pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
 
 	pp->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(pp->clk)) {
@@ -3107,11 +3321,18 @@
 		goto err_clk;
 	}
 
+	/* Alloc per-cpu port structure */
+	pp->ports = alloc_percpu(struct mvneta_pcpu_port);
+	if (!pp->ports) {
+		err = -ENOMEM;
+		goto err_clk;
+	}
+
 	/* Alloc per-cpu stats */
 	pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
 	if (!pp->stats) {
 		err = -ENOMEM;
-		goto err_clk;
+		goto err_free_ports;
 	}
 
 	dt_mac_addr = of_get_mac_address(dn);
@@ -3152,7 +3373,12 @@
 	if (dram_target_info)
 		mvneta_conf_mbus_windows(pp, dram_target_info);
 
-	netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+	for_each_present_cpu(cpu) {
+		struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
+		netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+		port->pp = pp;
+	}
 
 	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
 	dev->hw_features |= dev->features;
@@ -3183,6 +3409,8 @@
 
 err_free_stats:
 	free_percpu(pp->stats);
+err_free_ports:
+	free_percpu(pp->ports);
 err_clk:
 	clk_disable_unprepare(pp->clk);
 err_put_phy_node:
@@ -3202,6 +3430,7 @@
 
 	unregister_netdev(dev);
 	clk_disable_unprepare(pp->clk);
+	free_percpu(pp->ports);
 	free_percpu(pp->stats);
 	irq_dispose_mapping(dev->irq);
 	of_node_put(pp->phy_node);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index d9f4498..5606a04 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4819,6 +4819,18 @@
 		memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
 			      ETH_ALEN);
 
+	/* if the address is invalid, use a random value */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		struct sockaddr sa = { AF_UNSPEC };
+
+		netdev_warn(dev,
+			    "Invalid MAC address, defaulting to random\n");
+		eth_hw_addr_random(dev);
+		memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN);
+		if (sky2_set_mac_address(dev, &sa))
+			netdev_warn(dev, "Failed to set MAC address.\n");
+	}
+
 	return dev;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f79d812..ddb5541 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -95,9 +95,6 @@
 		(u16) (mdev->dev->caps.fw_ver & 0xffff));
 	strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->n_stats = 0;
-	drvinfo->regdump_len = 0;
-	drvinfo->eedump_len = 0;
 }
 
 static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4726122..886e1bc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -573,10 +573,8 @@
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_dev *dev = mdev->dev;
-	struct mlx4_mac_entry *entry;
 	int index = 0;
 	int err = 0;
-	u64 reg_id = 0;
 	int *qpn = &priv->base_qpn;
 	u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
 
@@ -600,44 +598,11 @@
 	en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
 	if (err) {
 		en_err(priv, "Failed to reserve qp for mac registration\n");
-		goto qp_err;
+		mlx4_unregister_mac(dev, priv->port, mac);
+		return err;
 	}
 
-	err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
-	if (err)
-		goto steer_err;
-
-	err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
-				       &priv->tunnel_reg_id);
-	if (err)
-		goto tunnel_err;
-
-	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry) {
-		err = -ENOMEM;
-		goto alloc_err;
-	}
-	memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
-	memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
-	entry->reg_id = reg_id;
-
-	hlist_add_head_rcu(&entry->hlist,
-			   &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
-
 	return 0;
-
-alloc_err:
-	if (priv->tunnel_reg_id)
-		mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
-tunnel_err:
-	mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
-
-steer_err:
-	mlx4_qp_release_range(dev, *qpn, 1);
-
-qp_err:
-	mlx4_unregister_mac(dev, priv->port, mac);
-	return err;
 }
 
 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
@@ -645,39 +610,13 @@
 	struct mlx4_en_dev *mdev = priv->mdev;
 	struct mlx4_dev *dev = mdev->dev;
 	int qpn = priv->base_qpn;
-	u64 mac;
 
 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
-		mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+		u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
 		en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
 		       priv->dev->dev_addr);
 		mlx4_unregister_mac(dev, priv->port, mac);
 	} else {
-		struct mlx4_mac_entry *entry;
-		struct hlist_node *tmp;
-		struct hlist_head *bucket;
-		unsigned int i;
-
-		for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
-			bucket = &priv->mac_hash[i];
-			hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
-				mac = mlx4_mac_to_u64(entry->mac);
-				en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
-				       entry->mac);
-				mlx4_en_uc_steer_release(priv, entry->mac,
-							 qpn, entry->reg_id);
-
-				mlx4_unregister_mac(dev, priv->port, mac);
-				hlist_del_rcu(&entry->hlist);
-				kfree_rcu(entry, rcu);
-			}
-		}
-
-		if (priv->tunnel_reg_id) {
-			mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
-			priv->tunnel_reg_id = 0;
-		}
-
 		en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
 		       priv->port, qpn);
 		mlx4_qp_release_range(dev, qpn, 1);
@@ -1283,6 +1222,75 @@
 }
 #endif
 
+static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
+{
+	u64 reg_id;
+	int err = 0;
+	int *qpn = &priv->base_qpn;
+	struct mlx4_mac_entry *entry;
+
+	err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
+	if (err)
+		return err;
+
+	err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
+				       &priv->tunnel_reg_id);
+	if (err)
+		goto tunnel_err;
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		err = -ENOMEM;
+		goto alloc_err;
+	}
+
+	memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+	memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
+	entry->reg_id = reg_id;
+	hlist_add_head_rcu(&entry->hlist,
+			   &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
+
+	return 0;
+
+alloc_err:
+	if (priv->tunnel_reg_id)
+		mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+
+tunnel_err:
+	mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
+	return err;
+}
+
+static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
+{
+	u64 mac;
+	unsigned int i;
+	int qpn = priv->base_qpn;
+	struct hlist_head *bucket;
+	struct hlist_node *tmp;
+	struct mlx4_mac_entry *entry;
+
+	for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+		bucket = &priv->mac_hash[i];
+		hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
+			mac = mlx4_mac_to_u64(entry->mac);
+			en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
+			       entry->mac);
+			mlx4_en_uc_steer_release(priv, entry->mac,
+						 qpn, entry->reg_id);
+
+			mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
+			hlist_del_rcu(&entry->hlist);
+			kfree_rcu(entry, rcu);
+		}
+	}
+
+	if (priv->tunnel_reg_id) {
+		mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+		priv->tunnel_reg_id = 0;
+	}
+}
+
 static void mlx4_en_tx_timeout(struct net_device *dev)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1684,6 +1692,11 @@
 		goto tx_err;
 	}
 
+	/* Set Unicast and VXLAN steering rules */
+	if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
+	    mlx4_en_set_rss_steer_rules(priv))
+		mlx4_warn(mdev, "Failed setting steering rules\n");
+
 	/* Attach rx QP to bradcast address */
 	eth_broadcast_addr(&mc_list[10]);
 	mc_list[5] = priv->port; /* needed for B0 steering support */
@@ -1831,6 +1844,9 @@
 	for (i = 0; i < priv->tx_ring_num; i++)
 		mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
 
+	if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+		mlx4_en_delete_rss_steer_rules(priv);
+
 	/* Free RSS qps */
 	mlx4_en_release_rss_steer(priv);
 
@@ -2800,7 +2816,6 @@
 	struct mlx4_en_priv *priv;
 	int i;
 	int err;
-	u64 mac_u64;
 
 	dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
 				 MAX_TX_RINGS, MAX_RX_RINGS);
@@ -2892,17 +2907,17 @@
 	dev->addr_len = ETH_ALEN;
 	mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
 	if (!is_valid_ether_addr(dev->dev_addr)) {
-		if (mlx4_is_slave(priv->mdev->dev)) {
-			eth_hw_addr_random(dev);
-			en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
-			mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
-			mdev->dev->caps.def_mac[priv->port] = mac_u64;
-		} else {
-			en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
-			       priv->port, dev->dev_addr);
-			err = -EINVAL;
-			goto out;
-		}
+		en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+		       priv->port, dev->dev_addr);
+		err = -EINVAL;
+		goto out;
+	} else if (mlx4_is_slave(priv->mdev->dev) &&
+		   (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
+		/* Random MAC was assigned in mlx4_slave_cap
+		 * in mlx4_core module
+		 */
+		dev->addr_assign_type |= NET_ADDR_RANDOM;
+		en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
 	}
 
 	memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 8e81e53..c344884 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1364,6 +1364,10 @@
 	 * and performing a NOP command
 	 */
 	for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
+		/* Make sure request_irq was called */
+		if (!priv->eq_table.eq[i].have_irq)
+			continue;
+
 		/* Temporary use polling for command completions */
 		mlx4_cmd_use_polling(dev);
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e8ec1de..f13a4d7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -2840,3 +2840,19 @@
 	return -EOPNOTSUPP;
 }
 EXPORT_SYMBOL(set_phv_bit);
+
+void mlx4_replace_zero_macs(struct mlx4_dev *dev)
+{
+	int i;
+	u8 mac_addr[ETH_ALEN];
+
+	dev->port_random_macs = 0;
+	for (i = 1; i <= dev->caps.num_ports; ++i)
+		if (!dev->caps.def_mac[i] &&
+		    dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
+			eth_random_addr(mac_addr);
+			dev->port_random_macs |= 1 << i;
+			dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
+		}
+}
+EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 006757f..85f1b1e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -863,6 +863,8 @@
 		return -ENODEV;
 	}
 
+	mlx4_replace_zero_macs(dev);
+
 	dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
 	dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
 	dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
@@ -2669,14 +2671,11 @@
 
 	if (msi_x) {
 		int nreq = dev->caps.num_ports * num_online_cpus() + 1;
-		bool shared_ports = false;
 
 		nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
 			     nreq);
-		if (nreq > MAX_MSIX) {
+		if (nreq > MAX_MSIX)
 			nreq = MAX_MSIX;
-			shared_ports = true;
-		}
 
 		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
 		if (!entries)
@@ -2699,9 +2698,6 @@
 		bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
 			    dev->caps.num_ports);
 
-		if (MLX4_IS_LEGACY_EQ_MODE(dev->caps))
-			shared_ports = true;
-
 		for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
 			if (i == MLX4_EQ_ASYNC)
 				continue;
@@ -2709,7 +2705,7 @@
 			priv->eq_table.eq[i].irq =
 				entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
 
-			if (shared_ports) {
+			if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
 				bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
 					    dev->caps.num_ports);
 				/* We don't set affinity hint when there
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index bd9ea0d..1d4e2e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1184,10 +1184,11 @@
 	if (prot == MLX4_PROT_ETH) {
 		/* manage the steering entry for promisc mode */
 		if (new_entry)
-			new_steering_entry(dev, port, steer, index, qp->qpn);
+			err = new_steering_entry(dev, port, steer,
+						 index, qp->qpn);
 		else
-			existing_steering_entry(dev, port, steer,
-						index, qp->qpn);
+			err = existing_steering_entry(dev, port, steer,
+						      index, qp->qpn);
 	}
 	if (err && link && index != -1) {
 		if (index < dev->caps.num_mgms)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 232b2b5..e1cf903 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1378,6 +1378,8 @@
 
 void mlx4_init_quotas(struct mlx4_dev *dev);
 
+/* for VFs, replace zero MACs with randomly-generated MACs at driver start */
+void mlx4_replace_zero_macs(struct mlx4_dev *dev);
 int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
 /* Returns the VF index of slave */
 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 78f51e1..9319519 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -318,7 +318,7 @@
 				key, NULL);
 	} else {
 		mailbox = mlx4_alloc_cmd_mailbox(dev);
-		if (IS_ERR_OR_NULL(mailbox))
+		if (IS_ERR(mailbox))
 			return PTR_ERR(mailbox);
 
 		err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2026863..3311f35 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -422,15 +422,15 @@
 	u64 qp_mask = 0;
 	int err = 0;
 
+	if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+		return -EINVAL;
+
 	mailbox = mlx4_alloc_cmd_mailbox(dev);
 	if (IS_ERR(mailbox))
 		return PTR_ERR(mailbox);
 
 	cmd = (struct mlx4_update_qp_context *)mailbox->buf;
 
-	if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
-		return -EINVAL;
-
 	if (attr & MLX4_UPDATE_QP_SMAC) {
 		pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
 		cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 731423c..ac4b99a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1238,8 +1238,10 @@
 	return 0;
 
 undo:
-	for (--i; i >= base; --i)
+	for (--i; i >= 0; --i) {
 		rb_erase(&res_arr[i]->node, root);
+		list_del_init(&res_arr[i]->list);
+	}
 
 	spin_unlock_irq(mlx4_tlock(dev));
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 84838c2..fabfc9e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -254,6 +254,156 @@
 		pr_debug("\n");
 }
 
+enum {
+	MLX5_DRIVER_STATUS_ABORTED = 0xfe,
+	MLX5_DRIVER_SYND = 0xbadd00de,
+};
+
+static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
+				       u32 *synd, u8 *status)
+{
+	*synd = 0;
+	*status = 0;
+
+	switch (op) {
+	case MLX5_CMD_OP_TEARDOWN_HCA:
+	case MLX5_CMD_OP_DISABLE_HCA:
+	case MLX5_CMD_OP_MANAGE_PAGES:
+	case MLX5_CMD_OP_DESTROY_MKEY:
+	case MLX5_CMD_OP_DESTROY_EQ:
+	case MLX5_CMD_OP_DESTROY_CQ:
+	case MLX5_CMD_OP_DESTROY_QP:
+	case MLX5_CMD_OP_DESTROY_PSV:
+	case MLX5_CMD_OP_DESTROY_SRQ:
+	case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+	case MLX5_CMD_OP_DESTROY_DCT:
+	case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
+	case MLX5_CMD_OP_DEALLOC_PD:
+	case MLX5_CMD_OP_DEALLOC_UAR:
+	case MLX5_CMD_OP_DETTACH_FROM_MCG:
+	case MLX5_CMD_OP_DEALLOC_XRCD:
+	case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
+	case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
+	case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
+	case MLX5_CMD_OP_DESTROY_TIR:
+	case MLX5_CMD_OP_DESTROY_SQ:
+	case MLX5_CMD_OP_DESTROY_RQ:
+	case MLX5_CMD_OP_DESTROY_RMP:
+	case MLX5_CMD_OP_DESTROY_TIS:
+	case MLX5_CMD_OP_DESTROY_RQT:
+	case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+	case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
+	case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+		return MLX5_CMD_STAT_OK;
+
+	case MLX5_CMD_OP_QUERY_HCA_CAP:
+	case MLX5_CMD_OP_QUERY_ADAPTER:
+	case MLX5_CMD_OP_INIT_HCA:
+	case MLX5_CMD_OP_ENABLE_HCA:
+	case MLX5_CMD_OP_QUERY_PAGES:
+	case MLX5_CMD_OP_SET_HCA_CAP:
+	case MLX5_CMD_OP_QUERY_ISSI:
+	case MLX5_CMD_OP_SET_ISSI:
+	case MLX5_CMD_OP_CREATE_MKEY:
+	case MLX5_CMD_OP_QUERY_MKEY:
+	case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
+	case MLX5_CMD_OP_PAGE_FAULT_RESUME:
+	case MLX5_CMD_OP_CREATE_EQ:
+	case MLX5_CMD_OP_QUERY_EQ:
+	case MLX5_CMD_OP_GEN_EQE:
+	case MLX5_CMD_OP_CREATE_CQ:
+	case MLX5_CMD_OP_QUERY_CQ:
+	case MLX5_CMD_OP_MODIFY_CQ:
+	case MLX5_CMD_OP_CREATE_QP:
+	case MLX5_CMD_OP_RST2INIT_QP:
+	case MLX5_CMD_OP_INIT2RTR_QP:
+	case MLX5_CMD_OP_RTR2RTS_QP:
+	case MLX5_CMD_OP_RTS2RTS_QP:
+	case MLX5_CMD_OP_SQERR2RTS_QP:
+	case MLX5_CMD_OP_2ERR_QP:
+	case MLX5_CMD_OP_2RST_QP:
+	case MLX5_CMD_OP_QUERY_QP:
+	case MLX5_CMD_OP_SQD_RTS_QP:
+	case MLX5_CMD_OP_INIT2INIT_QP:
+	case MLX5_CMD_OP_CREATE_PSV:
+	case MLX5_CMD_OP_CREATE_SRQ:
+	case MLX5_CMD_OP_QUERY_SRQ:
+	case MLX5_CMD_OP_ARM_RQ:
+	case MLX5_CMD_OP_CREATE_XRC_SRQ:
+	case MLX5_CMD_OP_QUERY_XRC_SRQ:
+	case MLX5_CMD_OP_ARM_XRC_SRQ:
+	case MLX5_CMD_OP_CREATE_DCT:
+	case MLX5_CMD_OP_DRAIN_DCT:
+	case MLX5_CMD_OP_QUERY_DCT:
+	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+	case MLX5_CMD_OP_QUERY_VPORT_STATE:
+	case MLX5_CMD_OP_MODIFY_VPORT_STATE:
+	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+	case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
+	case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+	case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+	case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+	case MLX5_CMD_OP_SET_ROCE_ADDRESS:
+	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+	case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
+	case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
+	case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+	case MLX5_CMD_OP_QUERY_Q_COUNTER:
+	case MLX5_CMD_OP_ALLOC_PD:
+	case MLX5_CMD_OP_ALLOC_UAR:
+	case MLX5_CMD_OP_CONFIG_INT_MODERATION:
+	case MLX5_CMD_OP_ACCESS_REG:
+	case MLX5_CMD_OP_ATTACH_TO_MCG:
+	case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+	case MLX5_CMD_OP_MAD_IFC:
+	case MLX5_CMD_OP_QUERY_MAD_DEMUX:
+	case MLX5_CMD_OP_SET_MAD_DEMUX:
+	case MLX5_CMD_OP_NOP:
+	case MLX5_CMD_OP_ALLOC_XRCD:
+	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+	case MLX5_CMD_OP_QUERY_CONG_STATUS:
+	case MLX5_CMD_OP_MODIFY_CONG_STATUS:
+	case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+	case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
+	case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+	case MLX5_CMD_OP_CREATE_TIR:
+	case MLX5_CMD_OP_MODIFY_TIR:
+	case MLX5_CMD_OP_QUERY_TIR:
+	case MLX5_CMD_OP_CREATE_SQ:
+	case MLX5_CMD_OP_MODIFY_SQ:
+	case MLX5_CMD_OP_QUERY_SQ:
+	case MLX5_CMD_OP_CREATE_RQ:
+	case MLX5_CMD_OP_MODIFY_RQ:
+	case MLX5_CMD_OP_QUERY_RQ:
+	case MLX5_CMD_OP_CREATE_RMP:
+	case MLX5_CMD_OP_MODIFY_RMP:
+	case MLX5_CMD_OP_QUERY_RMP:
+	case MLX5_CMD_OP_CREATE_TIS:
+	case MLX5_CMD_OP_MODIFY_TIS:
+	case MLX5_CMD_OP_QUERY_TIS:
+	case MLX5_CMD_OP_CREATE_RQT:
+	case MLX5_CMD_OP_MODIFY_RQT:
+	case MLX5_CMD_OP_QUERY_RQT:
+	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+		*status = MLX5_DRIVER_STATUS_ABORTED;
+		*synd = MLX5_DRIVER_SYND;
+		return -EIO;
+	default:
+		mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
+		return -EINVAL;
+	}
+}
+
 const char *mlx5_command_str(int command)
 {
 	switch (command) {
@@ -473,6 +623,7 @@
 	struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
 	struct mlx5_cmd_layout *lay;
 	struct semaphore *sem;
+	unsigned long flags;
 
 	sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
 	down(sem);
@@ -485,6 +636,9 @@
 		}
 	} else {
 		ent->idx = cmd->max_reg_cmds;
+		spin_lock_irqsave(&cmd->alloc_lock, flags);
+		clear_bit(ent->idx, &cmd->bitmask);
+		spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 	}
 
 	ent->token = alloc_token(cmd);
@@ -584,6 +738,16 @@
 	return err;
 }
 
+static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
+{
+	return &out->syndrome;
+}
+
+static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
+{
+	return &out->status;
+}
+
 /*  Notes:
  *    1. Callback functions may not sleep
  *    2. page queue commands do not support asynchrous completion
@@ -1081,7 +1245,7 @@
 	}
 }
 
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
 {
 	struct mlx5_cmd *cmd = &dev->cmd;
 	struct mlx5_cmd_work_ent *ent;
@@ -1092,7 +1256,10 @@
 	s64 ds;
 	struct mlx5_cmd_stats *stats;
 	unsigned long flags;
+	unsigned long vector;
 
+	/* there can be at most 32 command queues */
+	vector = vec & 0xffffffff;
 	for (i = 0; i < (1 << cmd->log_sz); i++) {
 		if (test_bit(i, &vector)) {
 			struct semaphore *sem;
@@ -1110,11 +1277,16 @@
 					ent->ret = verify_signature(ent);
 				else
 					ent->ret = 0;
-				ent->status = ent->lay->status_own >> 1;
+				if (vec & MLX5_TRIGGERED_CMD_COMP)
+					ent->status = MLX5_DRIVER_STATUS_ABORTED;
+				else
+					ent->status = ent->lay->status_own >> 1;
+
 				mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
 					      ent->ret, deliv_status_to_str(ent->status), ent->status);
 			}
 			free_ent(cmd, ent->idx);
+
 			if (ent->callback) {
 				ds = ent->ts2 - ent->ts1;
 				if (ent->op < ARRAY_SIZE(cmd->stats)) {
@@ -1184,6 +1356,11 @@
 	return msg;
 }
 
+static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
+{
+	return be16_to_cpu(in->opcode);
+}
+
 static int is_manage_pages(struct mlx5_inbox_hdr *in)
 {
 	return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
@@ -1198,6 +1375,15 @@
 	gfp_t gfp;
 	int err;
 	u8 status = 0;
+	u32 drv_synd;
+
+	if (pci_channel_offline(dev->pdev) ||
+	    dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+		err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status);
+		*get_synd_ptr(out) = cpu_to_be32(drv_synd);
+		*get_status_ptr(out) = status;
+		return err;
+	}
 
 	pages_queue = is_manage_pages(in);
 	gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
index e71563c..22d603f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -598,6 +598,8 @@
 		return;
 
 	priv->vlan.filter_disabled = false;
+	if (priv->netdev->flags & IFF_PROMISC)
+		return;
 	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
 }
 
@@ -607,6 +609,8 @@
 		return;
 
 	priv->vlan.filter_disabled = true;
+	if (priv->netdev->flags & IFF_PROMISC)
+		return;
 	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
 }
 
@@ -717,8 +721,12 @@
 	bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
 	bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
 
-	if (enable_promisc)
+	if (enable_promisc) {
 		mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+		if (!priv->vlan.filter_disabled)
+			mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+					    0);
+	}
 	if (enable_allmulti)
 		mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
 	if (enable_broadcast)
@@ -730,8 +738,12 @@
 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
 	if (disable_allmulti)
 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
-	if (disable_promisc)
+	if (disable_promisc) {
+		if (!priv->vlan.filter_disabled)
+			mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+					    0);
 		mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+	}
 
 	ea->promisc_enabled   = promisc_enabled;
 	ea->allmulti_enabled  = allmulti_enabled;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index aa0d5ff..9335e5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -200,25 +200,3 @@
 
 	return err;
 }
-
-int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey)
-{
-	struct mlx5_cmd_query_special_contexts_mbox_in in;
-	struct mlx5_cmd_query_special_contexts_mbox_out out;
-	int err;
-
-	memset(&in, 0, sizeof(in));
-	memset(&out, 0, sizeof(out));
-	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
-	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
-	if (err)
-		return err;
-
-	if (out.hdr.status)
-		err = mlx5_cmd_status_to_err(&out.hdr);
-
-	*rsvd_lkey = be32_to_cpu(out.resd_lkey);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_query_special_context);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 1277c42..f5deb64 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -34,6 +34,7 @@
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/vmalloc.h>
+#include <linux/hardirq.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
 #include "mlx5_core.h"
@@ -57,31 +58,102 @@
 	MLX5_HEALTH_SYNDR_HIGH_TEMP		= 0x10
 };
 
-static DEFINE_SPINLOCK(health_lock);
-static LIST_HEAD(health_list);
-static struct work_struct health_work;
+enum {
+	MLX5_NIC_IFC_FULL		= 0,
+	MLX5_NIC_IFC_DISABLED		= 1,
+	MLX5_NIC_IFC_NO_DRAM_NIC	= 2
+};
+
+static u8 get_nic_interface(struct mlx5_core_dev *dev)
+{
+	return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
+}
+
+static void trigger_cmd_completions(struct mlx5_core_dev *dev)
+{
+	unsigned long flags;
+	u64 vector;
+
+	/* wait for pending handlers to complete */
+	synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
+	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
+	if (!vector)
+		goto no_trig;
+
+	vector |= MLX5_TRIGGERED_CMD_COMP;
+	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+
+	mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
+	mlx5_cmd_comp_handler(dev, vector);
+	return;
+
+no_trig:
+	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+}
+
+static int in_fatal(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_health *health = &dev->priv.health;
+	struct health_buffer __iomem *h = health->health;
+
+	if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED)
+		return 1;
+
+	if (ioread32be(&h->fw_ver) == 0xffffffff)
+		return 1;
+
+	return 0;
+}
+
+void mlx5_enter_error_state(struct mlx5_core_dev *dev)
+{
+	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+		return;
+
+	mlx5_core_err(dev, "start\n");
+	if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+
+	mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
+	mlx5_core_err(dev, "end\n");
+}
+
+static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
+{
+	u8 nic_interface = get_nic_interface(dev);
+
+	switch (nic_interface) {
+	case MLX5_NIC_IFC_FULL:
+		mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
+		break;
+
+	case MLX5_NIC_IFC_DISABLED:
+		mlx5_core_warn(dev, "starting teardown\n");
+		break;
+
+	case MLX5_NIC_IFC_NO_DRAM_NIC:
+		mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
+		break;
+	default:
+		mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
+			       nic_interface);
+	}
+
+	mlx5_disable_device(dev);
+}
 
 static void health_care(struct work_struct *work)
 {
-	struct mlx5_core_health *health, *n;
+	struct mlx5_core_health *health;
 	struct mlx5_core_dev *dev;
 	struct mlx5_priv *priv;
-	LIST_HEAD(tlist);
 
-	spin_lock_irq(&health_lock);
-	list_splice_init(&health_list, &tlist);
-
-	spin_unlock_irq(&health_lock);
-
-	list_for_each_entry_safe(health, n, &tlist, list) {
-		priv = container_of(health, struct mlx5_priv, health);
-		dev = container_of(priv, struct mlx5_core_dev, priv);
-		mlx5_core_warn(dev, "handling bad device here\n");
-		/* nothing yet */
-		spin_lock_irq(&health_lock);
-		list_del_init(&health->list);
-		spin_unlock_irq(&health_lock);
-	}
+	health = container_of(work, struct mlx5_core_health, work);
+	priv = container_of(health, struct mlx5_priv, health);
+	dev = container_of(priv, struct mlx5_core_dev, priv);
+	mlx5_core_warn(dev, "handling bad device here\n");
+	mlx5_handle_bad_state(dev);
 }
 
 static const char *hsynd_str(u8 synd)
@@ -114,41 +186,70 @@
 	}
 }
 
-static u16 read_be16(__be16 __iomem *p)
+static u16 get_maj(u32 fw)
 {
-	return swab16(readl((__force u16 __iomem *) p));
+	return fw >> 28;
 }
 
-static u32 read_be32(__be32 __iomem *p)
+static u16 get_min(u32 fw)
 {
-	return swab32(readl((__force u32 __iomem *) p));
+	return fw >> 16 & 0xfff;
+}
+
+static u16 get_sub(u32 fw)
+{
+	return fw & 0xffff;
 }
 
 static void print_health_info(struct mlx5_core_dev *dev)
 {
 	struct mlx5_core_health *health = &dev->priv.health;
 	struct health_buffer __iomem *h = health->health;
+	char fw_str[18];
+	u32 fw;
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
-		pr_info("assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i));
+	/* If the syndrom is 0, the device is OK and no need to print buffer */
+	if (!ioread8(&h->synd))
+		return;
 
-	pr_info("assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr));
-	pr_info("assert_callra 0x%08x\n", read_be32(&h->assert_callra));
-	pr_info("fw_ver 0x%08x\n", read_be32(&h->fw_ver));
-	pr_info("hw_id 0x%08x\n", read_be32(&h->hw_id));
-	pr_info("irisc_index %d\n", readb(&h->irisc_index));
-	pr_info("synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd)));
-	pr_info("ext_sync 0x%04x\n", read_be16(&h->ext_synd));
+	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
+		dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
+
+	dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
+	dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
+	fw = ioread32be(&h->fw_ver);
+	sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
+	dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
+	dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
+	dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
+	dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
+	dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+}
+
+static unsigned long get_next_poll_jiffies(void)
+{
+	unsigned long next;
+
+	get_random_bytes(&next, sizeof(next));
+	next %= HZ;
+	next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
+
+	return next;
 }
 
 static void poll_health(unsigned long data)
 {
 	struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
 	struct mlx5_core_health *health = &dev->priv.health;
-	unsigned long next;
 	u32 count;
 
+	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+		trigger_cmd_completions(dev);
+		mod_timer(&health->timer, get_next_poll_jiffies());
+		return;
+	}
+
 	count = ioread32be(health->health_counter);
 	if (count == health->prev)
 		++health->miss_counter;
@@ -157,18 +258,16 @@
 
 	health->prev = count;
 	if (health->miss_counter == MAX_MISSES) {
-		mlx5_core_err(dev, "device's health compromised\n");
+		dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
 		print_health_info(dev);
-		spin_lock_irq(&health_lock);
-		list_add_tail(&health->list, &health_list);
-		spin_unlock_irq(&health_lock);
-
-		queue_work(mlx5_core_wq, &health_work);
 	} else {
-		get_random_bytes(&next, sizeof(next));
-		next %= HZ;
-		next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
-		mod_timer(&health->timer, next);
+		mod_timer(&health->timer, get_next_poll_jiffies());
+	}
+
+	if (in_fatal(dev) && !health->sick) {
+		health->sick = true;
+		print_health_info(dev);
+		queue_work(health->wq, &health->work);
 	}
 }
 
@@ -176,7 +275,6 @@
 {
 	struct mlx5_core_health *health = &dev->priv.health;
 
-	INIT_LIST_HEAD(&health->list);
 	init_timer(&health->timer);
 	health->health = &dev->iseg->health;
 	health->health_counter = &dev->iseg->health_counter;
@@ -192,18 +290,33 @@
 	struct mlx5_core_health *health = &dev->priv.health;
 
 	del_timer_sync(&health->timer);
-
-	spin_lock_irq(&health_lock);
-	if (!list_empty(&health->list))
-		list_del_init(&health->list);
-	spin_unlock_irq(&health_lock);
 }
 
-void mlx5_health_cleanup(void)
+void mlx5_health_cleanup(struct mlx5_core_dev *dev)
 {
+	struct mlx5_core_health *health = &dev->priv.health;
+
+	destroy_workqueue(health->wq);
 }
 
-void  __init mlx5_health_init(void)
+int mlx5_health_init(struct mlx5_core_dev *dev)
 {
-	INIT_WORK(&health_work, health_care);
+	struct mlx5_core_health *health;
+	char *name;
+
+	health = &dev->priv.health;
+	name = kmalloc(64, GFP_KERNEL);
+	if (!name)
+		return -ENOMEM;
+
+	strcpy(name, "mlx5_health");
+	strcat(name, dev_name(&dev->pdev->dev));
+	health->wq = create_singlethread_workqueue(name);
+	kfree(name);
+	if (!health->wq)
+		return -ENOMEM;
+
+	INIT_WORK(&health->work, health_care);
+
+	return 0;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 7718f6a..2388aec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -39,12 +39,14 @@
 #include <linux/slab.h>
 #include <linux/io-mapping.h>
 #include <linux/interrupt.h>
+#include <linux/delay.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cq.h>
 #include <linux/mlx5/qp.h>
 #include <linux/mlx5/srq.h>
 #include <linux/debugfs.h>
 #include <linux/kmod.h>
+#include <linux/delay.h>
 #include <linux/mlx5/mlx5_ifc.h>
 #include "mlx5_core.h"
 
@@ -62,7 +64,6 @@
 module_param_named(prof_sel, prof_sel, int, 0444);
 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
 
-struct workqueue_struct *mlx5_core_wq;
 static LIST_HEAD(intf_list);
 static LIST_HEAD(dev_list);
 static DEFINE_MUTEX(intf_mutex);
@@ -152,6 +153,25 @@
 	},
 };
 
+#define FW_INIT_TIMEOUT_MILI	2000
+#define FW_INIT_WAIT_MS		2
+
+static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
+{
+	unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
+	int err = 0;
+
+	while (fw_initializing(dev)) {
+		if (time_after(jiffies, end)) {
+			err = -EBUSY;
+			break;
+		}
+		msleep(FW_INIT_WAIT_MS);
+	}
+
+	return err;
+}
+
 static int set_dma_caps(struct pci_dev *pdev)
 {
 	int err;
@@ -182,6 +202,34 @@
 	return err;
 }
 
+static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
+{
+	struct pci_dev *pdev = dev->pdev;
+	int err = 0;
+
+	mutex_lock(&dev->pci_status_mutex);
+	if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
+		err = pci_enable_device(pdev);
+		if (!err)
+			dev->pci_status = MLX5_PCI_STATUS_ENABLED;
+	}
+	mutex_unlock(&dev->pci_status_mutex);
+
+	return err;
+}
+
+static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
+{
+	struct pci_dev *pdev = dev->pdev;
+
+	mutex_lock(&dev->pci_status_mutex);
+	if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
+		pci_disable_device(pdev);
+		dev->pci_status = MLX5_PCI_STATUS_DISABLED;
+	}
+	mutex_unlock(&dev->pci_status_mutex);
+}
+
 static int request_bar(struct pci_dev *pdev)
 {
 	int err = 0;
@@ -808,7 +856,7 @@
 	if (!priv->dbg_root)
 		return -ENOMEM;
 
-	err = pci_enable_device(pdev);
+	err = mlx5_pci_enable_device(dev);
 	if (err) {
 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
 		goto err_dbg;
@@ -842,7 +890,7 @@
 	pci_clear_master(dev->pdev);
 	release_bar(dev->pdev);
 err_disable:
-	pci_disable_device(dev->pdev);
+	mlx5_pci_disable_device(dev);
 
 err_dbg:
 	debugfs_remove(priv->dbg_root);
@@ -854,7 +902,7 @@
 	iounmap(dev->iseg);
 	pci_clear_master(dev->pdev);
 	release_bar(dev->pdev);
-	pci_disable_device(dev->pdev);
+	mlx5_pci_disable_device(dev);
 	debugfs_remove(priv->dbg_root);
 }
 
@@ -864,13 +912,32 @@
 	struct pci_dev *pdev = dev->pdev;
 	int err;
 
+	mutex_lock(&dev->intf_state_mutex);
+	if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
+		dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
+			 __func__);
+		goto out;
+	}
+
 	dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
 		 fw_rev_min(dev), fw_rev_sub(dev));
 
+	/* on load removing any previous indication of internal error, device is
+	 * up
+	 */
+	dev->state = MLX5_DEVICE_STATE_UP;
+
 	err = mlx5_cmd_init(dev);
 	if (err) {
 		dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
-		return err;
+		goto out_err;
+	}
+
+	err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
+	if (err) {
+		dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
+			FW_INIT_TIMEOUT_MILI);
+		goto out_err;
 	}
 
 	mlx5_pagealloc_init(dev);
@@ -995,6 +1062,10 @@
 	if (err)
 		pr_info("failed request module on %s\n", MLX5_IB_MOD);
 
+	dev->interface_state = MLX5_INTERFACE_STATE_UP;
+out:
+	mutex_unlock(&dev->intf_state_mutex);
+
 	return 0;
 
 err_reg_dev:
@@ -1025,7 +1096,7 @@
 	mlx5_stop_health_poll(dev);
 	if (mlx5_cmd_teardown_hca(dev)) {
 		dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
-		return err;
+		goto out_err;
 	}
 
 err_pagealloc_stop:
@@ -1041,12 +1112,23 @@
 	mlx5_pagealloc_cleanup(dev);
 	mlx5_cmd_cleanup(dev);
 
+out_err:
+	dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+	mutex_unlock(&dev->intf_state_mutex);
+
 	return err;
 }
 
 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
 {
+	int err = 0;
 
+	mutex_lock(&dev->intf_state_mutex);
+	if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
+		dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
+			 __func__);
+		goto out;
+	}
 	mlx5_unregister_device(dev);
 	mlx5_cleanup_mr_table(dev);
 	mlx5_cleanup_srq_table(dev);
@@ -1060,9 +1142,10 @@
 	mlx5_eq_cleanup(dev);
 	mlx5_disable_msix(dev);
 	mlx5_stop_health_poll(dev);
-	if (mlx5_cmd_teardown_hca(dev)) {
+	err = mlx5_cmd_teardown_hca(dev);
+	if (err) {
 		dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
-		return 1;
+		goto out;
 	}
 	mlx5_pagealloc_stop(dev);
 	mlx5_reclaim_startup_pages(dev);
@@ -1070,11 +1153,14 @@
 	mlx5_pagealloc_cleanup(dev);
 	mlx5_cmd_cleanup(dev);
 
-	return 0;
+out:
+	dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
+	mutex_unlock(&dev->intf_state_mutex);
+	return err;
 }
 
-static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
-			    unsigned long param)
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+		     unsigned long param)
 {
 	struct mlx5_priv *priv = &dev->priv;
 	struct mlx5_device_context *dev_ctx;
@@ -1123,20 +1209,30 @@
 
 	INIT_LIST_HEAD(&priv->ctx_list);
 	spin_lock_init(&priv->ctx_lock);
+	mutex_init(&dev->pci_status_mutex);
+	mutex_init(&dev->intf_state_mutex);
 	err = mlx5_pci_init(dev, priv);
 	if (err) {
 		dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
 		goto clean_dev;
 	}
 
+	err = mlx5_health_init(dev);
+	if (err) {
+		dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
+		goto close_pci;
+	}
+
 	err = mlx5_load_one(dev, priv);
 	if (err) {
 		dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
-		goto close_pci;
+		goto clean_health;
 	}
 
 	return 0;
 
+clean_health:
+	mlx5_health_cleanup(dev);
 close_pci:
 	mlx5_pci_close(dev, priv);
 clean_dev:
@@ -1153,13 +1249,121 @@
 
 	if (mlx5_unload_one(dev, priv)) {
 		dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
+		mlx5_health_cleanup(dev);
 		return;
 	}
+	mlx5_health_cleanup(dev);
 	mlx5_pci_close(dev, priv);
 	pci_set_drvdata(pdev, NULL);
 	kfree(dev);
 }
 
+static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
+					      pci_channel_state_t state)
+{
+	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+	struct mlx5_priv *priv = &dev->priv;
+
+	dev_info(&pdev->dev, "%s was called\n", __func__);
+	mlx5_enter_error_state(dev);
+	mlx5_unload_one(dev, priv);
+	mlx5_pci_disable_device(dev);
+	return state == pci_channel_io_perm_failure ?
+		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
+{
+	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+	int err = 0;
+
+	dev_info(&pdev->dev, "%s was called\n", __func__);
+
+	err = mlx5_pci_enable_device(dev);
+	if (err) {
+		dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
+			, __func__, err);
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+	pci_set_master(pdev);
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+	mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+/* wait for the device to show vital signs. For now we check
+ * that we can read the device ID and that the health buffer
+ * shows a non zero value which is different than 0xffffffff
+ */
+static void wait_vital(struct pci_dev *pdev)
+{
+	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+	struct mlx5_core_health *health = &dev->priv.health;
+	const int niter = 100;
+	u32 count;
+	u16 did;
+	int i;
+
+	/* Wait for firmware to be ready after reset */
+	msleep(1000);
+	for (i = 0; i < niter; i++) {
+		if (pci_read_config_word(pdev, 2, &did)) {
+			dev_warn(&pdev->dev, "failed reading config word\n");
+			break;
+		}
+		if (did == pdev->device) {
+			dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
+			break;
+		}
+		msleep(50);
+	}
+	if (i == niter)
+		dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+
+	for (i = 0; i < niter; i++) {
+		count = ioread32be(health->health_counter);
+		if (count && count != 0xffffffff) {
+			dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+			break;
+		}
+		msleep(50);
+	}
+
+	if (i == niter)
+		dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+}
+
+static void mlx5_pci_resume(struct pci_dev *pdev)
+{
+	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+	struct mlx5_priv *priv = &dev->priv;
+	int err;
+
+	dev_info(&pdev->dev, "%s was called\n", __func__);
+
+	pci_save_state(pdev);
+	wait_vital(pdev);
+
+	err = mlx5_load_one(dev, priv);
+	if (err)
+		dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
+			, __func__, err);
+	else
+		dev_info(&pdev->dev, "%s: device recovered\n", __func__);
+}
+
+static const struct pci_error_handlers mlx5_err_handler = {
+	.error_detected = mlx5_pci_err_detected,
+	.slot_reset	= mlx5_pci_slot_reset,
+	.resume		= mlx5_pci_resume
+};
+
 static const struct pci_device_id mlx5_core_pci_table[] = {
 	{ PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
 	{ PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */
@@ -1176,7 +1380,8 @@
 	.name           = DRIVER_NAME,
 	.id_table       = mlx5_core_pci_table,
 	.probe          = init_one,
-	.remove         = remove_one
+	.remove         = remove_one,
+	.err_handler	= &mlx5_err_handler
 };
 
 static int __init init(void)
@@ -1184,16 +1389,10 @@
 	int err;
 
 	mlx5_register_debugfs();
-	mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
-	if (!mlx5_core_wq) {
-		err = -ENOMEM;
-		goto err_debug;
-	}
-	mlx5_health_init();
 
 	err = pci_register_driver(&mlx5_core_driver);
 	if (err)
-		goto err_health;
+		goto err_debug;
 
 #ifdef CONFIG_MLX5_CORE_EN
 	mlx5e_init();
@@ -1201,9 +1400,6 @@
 
 	return 0;
 
-err_health:
-	mlx5_health_cleanup();
-	destroy_workqueue(mlx5_core_wq);
 err_debug:
 	mlx5_unregister_debugfs();
 	return err;
@@ -1215,8 +1411,6 @@
 	mlx5e_cleanup();
 #endif
 	pci_unregister_driver(&mlx5_core_driver);
-	mlx5_health_cleanup();
-	destroy_workqueue(mlx5_core_wq);
 	mlx5_unregister_debugfs();
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 566a704..cee5b7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -43,25 +43,25 @@
 
 extern int mlx5_core_debug_mask;
 
-#define mlx5_core_dbg(dev, format, ...)					\
-	pr_debug("%s:%s:%d:(pid %d): " format,				\
-		 (dev)->priv.name, __func__, __LINE__, current->pid,	\
+#define mlx5_core_dbg(__dev, format, ...)				\
+	dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format,	\
+		 (__dev)->priv.name, __func__, __LINE__, current->pid,	\
 		 ##__VA_ARGS__)
 
-#define mlx5_core_dbg_mask(dev, mask, format, ...)			\
+#define mlx5_core_dbg_mask(__dev, mask, format, ...)			\
 do {									\
 	if ((mask) & mlx5_core_debug_mask)				\
-		mlx5_core_dbg(dev, format, ##__VA_ARGS__);		\
+		mlx5_core_dbg(__dev, format, ##__VA_ARGS__);		\
 } while (0)
 
-#define mlx5_core_err(dev, format, ...)					\
-	pr_err("%s:%s:%d:(pid %d): " format,				\
-	       (dev)->priv.name, __func__, __LINE__, current->pid,	\
+#define mlx5_core_err(__dev, format, ...)				\
+	dev_err(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format,	\
+	       (__dev)->priv.name, __func__, __LINE__, current->pid,	\
 	       ##__VA_ARGS__)
 
-#define mlx5_core_warn(dev, format, ...)				\
-	pr_warn("%s:%s:%d:(pid %d): " format,				\
-		(dev)->priv.name, __func__, __LINE__, current->pid,	\
+#define mlx5_core_warn(__dev, format, ...)				\
+	dev_warn(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format,	\
+		(__dev)->priv.name, __func__, __LINE__, current->pid,	\
 		##__VA_ARGS__)
 
 enum {
@@ -86,6 +86,10 @@
 int mlx5_query_board_id(struct mlx5_core_dev *dev);
 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+		     unsigned long param);
+void mlx5_enter_error_state(struct mlx5_core_dev *dev);
+void mlx5_disable_device(struct mlx5_core_dev *dev);
 
 void mlx5e_init(void);
 void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 76432a5..1cda5d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -493,15 +493,20 @@
 	struct fw_page *fwp;
 	struct rb_node *p;
 	int nclaimed = 0;
-	int err;
+	int err = 0;
 
 	do {
 		p = rb_first(&dev->priv.page_root);
 		if (p) {
 			fwp = rb_entry(p, struct fw_page, rb_node);
-			err = reclaim_pages(dev, fwp->func_id,
-					    optimal_reclaimed_pages(),
-					    &nclaimed);
+			if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+				free_4k(dev, fwp->addr);
+				nclaimed = 1;
+			} else {
+				err = reclaim_pages(dev, fwp->func_id,
+						    optimal_reclaimed_pages(),
+						    &nclaimed);
+			}
 			if (err) {
 				mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
 					       err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index ae30261..a87e773 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -302,7 +302,7 @@
 	u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
 
 	memset(in, 0, sizeof(in));
-	MLX5_SET(ptys_reg, in, local_port, local_port);
+	MLX5_SET(pvlc_reg, in, local_port, local_port);
 
 	return mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
 				    pvlc_size, MLX5_REG_PVLC, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 2941d9c..e36e122 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -30,3 +30,14 @@
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called mlxsw_switchx2.
+
+config MLXSW_SPECTRUM
+	tristate "Mellanox Technologies Spectrum support"
+	depends on MLXSW_CORE && NET_SWITCHDEV
+	default m
+	---help---
+	  This driver supports Mellanox Technologies Spectrum Ethernet
+	  Switch ASICs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mlxsw_spectrum.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 0a05f65..af01581 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -4,3 +4,6 @@
 mlxsw_pci-objs			:= pci.o
 obj-$(CONFIG_MLXSW_SWITCHX2)	+= mlxsw_switchx2.o
 mlxsw_switchx2-objs		:= switchx2.o
+obj-$(CONFIG_MLXSW_SPECTRUM)	+= mlxsw_spectrum.o
+mlxsw_spectrum-objs		:= spectrum.o spectrum_buffers.o \
+				   spectrum_switchdev.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 770db17..cd63b82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -464,6 +464,8 @@
  * passed in this command must be pinned.
  */
 
+#define MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX 32
+
 static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
 				   char *in_mbox, u32 vpm_entries_count)
 {
@@ -568,7 +570,7 @@
  */
 MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
 
-/* cmd_mbox_config_profile_set_fid_based
+/* cmd_mbox_config_profile_set_flood_mode
  * Capability bit. Setting a bit to 1 configures the profile
  * according to the mailbox contents.
  */
@@ -649,12 +651,8 @@
 MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
 
 /* cmd_mbox_config_profile_max_flood_tables
- * Maximum number of Flooding Tables. Flooding Tables are associated to
- * the different packet types for the different switch partitions.
- * Note that the table size depends on the fid_based mode.
- * In SwitchX silicon, tables are split equally between the switch
- * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
- * with swid-1 and the last 4 are associated with swid-2.
+ * Maximum number of single-entry flooding tables. Different flooding tables
+ * can be associated with different packet types.
  */
 MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
 
@@ -665,15 +663,42 @@
  */
 MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
 
-/* cmd_mbox_config_profile_fid_based
- * FID Based Flood Mode
- * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
- * 01 Use FID to offset the index to the Port Group Table (pgi)
- * 10 Use FID to offset the index to the Port Group Table (pgi) and
- * the Multicast ID
+/* cmd_mbox_config_profile_flood_mode
+ * Flooding mode to use.
+ * 0-2 - Backward compatible modes for SwitchX devices.
+ * 3 - Mixed mode, where:
+ * max_flood_tables indicates the number of single-entry tables.
+ * max_vid_flood_tables indicates the number of per-VID tables.
+ * max_fid_offset_flood_tables indicates the number of FID-offset tables.
+ * max_fid_flood_tables indicates the number of per-FID tables.
  */
 MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
 
+/* cmd_mbox_config_profile_max_fid_offset_flood_tables
+ * Maximum number of FID-offset flooding tables.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+	     max_fid_offset_flood_tables, 0x34, 24, 4);
+
+/* cmd_mbox_config_profile_fid_offset_flood_table_size
+ * The size (number of entries) of each FID-offset flood table.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+	     fid_offset_flood_table_size, 0x34, 0, 16);
+
+/* cmd_mbox_config_profile_max_fid_flood_tables
+ * Maximum number of per-FID flooding tables.
+ *
+ * Note: This flooding tables cover special FIDs only (vFIDs), starting at
+ * FID value 4K and higher.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_fid_flood_tables, 0x38, 24, 4);
+
+/* cmd_mbox_config_profile_fid_flood_table_size
+ * The size (number of entries) of each per-FID table.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, fid_flood_table_size, 0x38, 0, 16);
+
 /* cmd_mbox_config_profile_max_ib_mc
  * Maximum number of multicast FDB records for InfiniBand
  * FDB (in 512 chunks) per InfiniBand switch partition.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index dbcaf5d..97f0d93 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -287,7 +287,7 @@
 	mlxsw_emad_op_tlv_status_set(op_tlv, 0);
 	mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
 	mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
-	if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
+	if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
 		mlxsw_emad_op_tlv_method_set(op_tlv,
 					     MLXSW_EMAD_OP_TLV_METHOD_QUERY);
 	else
@@ -362,7 +362,7 @@
 	char *op_tlv;
 
 	op_tlv = mlxsw_emad_op_tlv(skb);
-	return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
+	return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
 }
 
 #define MLXSW_EMAD_TIMEOUT_MS 200
@@ -374,26 +374,31 @@
 	int err;
 	int ret;
 
+	mlxsw_core->emad.trans_active = true;
+
 	err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
 	if (err) {
 		dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
 			mlxsw_core->emad.tid);
 		dev_kfree_skb(skb);
-		return err;
+		goto trans_inactive_out;
 	}
 
-	mlxsw_core->emad.trans_active = true;
 	ret = wait_event_timeout(mlxsw_core->emad.wait,
 				 !(mlxsw_core->emad.trans_active),
 				 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
 	if (!ret) {
 		dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
 			 mlxsw_core->emad.tid);
-		mlxsw_core->emad.trans_active = false;
-		return -EIO;
+		err = -EIO;
+		goto trans_inactive_out;
 	}
 
 	return 0;
+
+trans_inactive_out:
+	mlxsw_core->emad.trans_active = false;
+	return err;
 }
 
 static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
@@ -506,7 +511,6 @@
 		return err;
 
 	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
-			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
 			    MLXSW_TRAP_ID_ETHEMAD);
 	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
 }
@@ -551,8 +555,8 @@
 {
 	char hpkt_pl[MLXSW_REG_HPKT_LEN];
 
+	mlxsw_core->emad.use_emad = false;
 	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
-			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
 			    MLXSW_TRAP_ID_ETHEMAD);
 	mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 1658084..8078273 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -54,6 +54,7 @@
 	MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
 
 #define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+#define MLXSW_DEVICE_KIND_SPECTRUM "spectrum"
 
 struct mlxsw_core;
 struct mlxsw_driver;
@@ -153,6 +154,10 @@
 	u8	max_flood_tables;
 	u8	max_vid_flood_tables;
 	u8	flood_mode;
+	u8	max_fid_offset_flood_tables;
+	u16	fid_offset_flood_table_size;
+	u8	max_fid_flood_tables;
+	u16	fid_flood_table_size;
 	u16	max_ib_mc;
 	u16	max_pkey;
 	u8	ar_sec;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index ffd55d0..a94dbda6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -171,15 +171,21 @@
 }
 
 static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
-					    struct mlxsw_item *item)
+					    struct mlxsw_item *item,
+					    unsigned short index)
 {
-	memcpy(dst, &buf[item->offset], item->size.bytes);
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+	memcpy(dst, &buf[offset], item->size.bytes);
 }
 
-static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
-					  struct mlxsw_item *item)
+static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
+					  struct mlxsw_item *item,
+					  unsigned short index)
 {
-	memcpy(&buf[item->offset], src, item->size.bytes);
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+	memcpy(&buf[offset], src, item->size.bytes);
 }
 
 static inline u16
@@ -187,6 +193,7 @@
 {
 	u16 max_index, be_index;
 	u16 offset;		/* byte offset inside the array */
+	u8 in_byte_index;
 
 	BUG_ON(index && !item->element_size);
 	if (item->offset % sizeof(u32) != 0 ||
@@ -199,7 +206,8 @@
 	max_index = (item->size.bytes << 3) / item->element_size - 1;
 	be_index = max_index - index;
 	offset = be_index * item->element_size >> 3;
-	*shift = index % (BITS_PER_BYTE / item->element_size) << 1;
+	in_byte_index  = index % (BITS_PER_BYTE / item->element_size);
+	*shift = in_byte_index * item->element_size;
 
 	return item->offset + offset;
 }
@@ -371,12 +379,40 @@
 static inline void								\
 mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst)		\
 {										\
-	__mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
+	__mlxsw_item_memcpy_from(buf, dst,					\
+				 &__ITEM_NAME(_type, _cname, _iname), 0);	\
 }										\
 static inline void								\
-mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src)		\
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src)	\
 {										\
-	__mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname));	\
+	__mlxsw_item_memcpy_to(buf, src,					\
+			       &__ITEM_NAME(_type, _cname, _iname), 0);		\
+}
+
+#define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes,	\
+			       _step, _instepoffset)				\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.step = _step,								\
+	.in_step_offset = _instepoffset,					\
+	.size = {.bytes = _sizebytes,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf,			\
+						  unsigned short index,		\
+						  char *dst)			\
+{										\
+	__mlxsw_item_memcpy_from(buf, dst,					\
+				 &__ITEM_NAME(_type, _cname, _iname), index);	\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf,			\
+						unsigned short index,		\
+						const char *src)		\
+{										\
+	__mlxsw_item_memcpy_to(buf, src,					\
+			       &__ITEM_NAME(_type, _cname, _iname), index);	\
 }
 
 #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes,	\
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 462cea3..de69e71 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -57,6 +57,7 @@
 
 static const struct pci_device_id mlxsw_pci_id_table[] = {
 	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
 	{0, }
 };
 
@@ -67,6 +68,8 @@
 	switch (id->device) {
 	case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
 		return MLXSW_DEVICE_KIND_SWITCHX2;
+	case PCI_DEVICE_ID_MELLANOX_SPECTRUM:
+		return MLXSW_DEVICE_KIND_SPECTRUM;
 	default:
 		BUG();
 	}
@@ -171,8 +174,8 @@
 	struct msix_entry msix_entry;
 	struct mlxsw_core *core;
 	struct {
-		u16 num_pages;
 		struct mlxsw_pci_mem_item *items;
+		unsigned int count;
 	} fw_area;
 	struct {
 		struct mlxsw_pci_mem_item out_mbox;
@@ -431,8 +434,7 @@
 
 	mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
 	if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
-		if (net_ratelimit())
-			dev_err(&pdev->dev, "failed to dma map tx frag\n");
+		dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
 		return -EIO;
 	}
 	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
@@ -497,6 +499,7 @@
 			      struct mlxsw_pci_queue *q)
 {
 	struct mlxsw_pci_queue_elem_info *elem_info;
+	u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
 	int i;
 	int err;
 
@@ -504,9 +507,9 @@
 	q->consumer_counter = 0;
 
 	/* Set CQ of same number of this RDQ with base
-	 * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
+	 * above SDQ count as the lower ones are assigned to SDQs.
 	 */
-	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
+	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
 	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
 	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
 		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
@@ -699,8 +702,8 @@
 put_new_skb:
 	memset(wqe, 0, q->elem_size);
 	err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
-	if (err && net_ratelimit())
-		dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
+	if (err)
+		dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
 	/* Everything is set up, ring doorbell to pass elem to HW */
 	q->producer_counter++;
 	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
@@ -830,7 +833,8 @@
 {
 	struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
 	struct mlxsw_pci *mlxsw_pci = q->pci;
-	unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
+	u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
+	unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
 	char *eqe;
 	u8 cqn;
 	bool cq_handle = false;
@@ -866,7 +870,7 @@
 
 	if (!cq_handle)
 		return;
-	for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
+	for_each_set_bit(cqn, active_cqns, cq_count) {
 		q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
 		mlxsw_pci_queue_tasklet_schedule(q);
 	}
@@ -1067,10 +1071,8 @@
 	num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
 	eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
 
-	if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
-	    (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
-	    (num_cqs != MLXSW_PCI_CQS_COUNT) ||
-	    (num_eqs != MLXSW_PCI_EQS_COUNT)) {
+	if (num_sdqs + num_rdqs > num_cqs ||
+	    num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
 		dev_err(&pdev->dev, "Unsupported number of queues\n");
 		return -EINVAL;
 	}
@@ -1215,6 +1217,14 @@
 			mbox, profile->max_flood_tables);
 		mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
 			mbox, profile->max_vid_flood_tables);
+		mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
+			mbox, profile->max_fid_offset_flood_tables);
+		mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
+			mbox, profile->fid_offset_flood_table_size);
+		mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
+			mbox, profile->max_fid_flood_tables);
+		mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
+			mbox, profile->fid_flood_table_size);
 	}
 	if (profile->used_flood_mode) {
 		mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
@@ -1272,6 +1282,7 @@
 				  u16 num_pages)
 {
 	struct mlxsw_pci_mem_item *mem_item;
+	int nent = 0;
 	int i;
 	int err;
 
@@ -1279,7 +1290,7 @@
 					   GFP_KERNEL);
 	if (!mlxsw_pci->fw_area.items)
 		return -ENOMEM;
-	mlxsw_pci->fw_area.num_pages = num_pages;
+	mlxsw_pci->fw_area.count = num_pages;
 
 	mlxsw_cmd_mbox_zero(mbox);
 	for (i = 0; i < num_pages; i++) {
@@ -1293,13 +1304,22 @@
 			err = -ENOMEM;
 			goto err_alloc;
 		}
-		mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
-		mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
+		mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
+		mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
+		if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
+			err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
+			if (err)
+				goto err_cmd_map_fa;
+			nent = 0;
+			mlxsw_cmd_mbox_zero(mbox);
+		}
 	}
 
-	err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
-	if (err)
-		goto err_cmd_map_fa;
+	if (nent) {
+		err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
+		if (err)
+			goto err_cmd_map_fa;
+	}
 
 	return 0;
 
@@ -1322,7 +1342,7 @@
 
 	mlxsw_cmd_unmap_fa(mlxsw_pci->core);
 
-	for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
+	for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
 		mem_item = &mlxsw_pci->fw_area.items[i];
 
 		pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
@@ -1582,11 +1602,11 @@
 
 	if (in_mbox)
 		memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
-	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
-	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
+	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
+	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
 
-	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
-	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
+	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
+	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
 
 	mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
 	mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
@@ -1642,8 +1662,9 @@
 							   CIR_OUT_PARAM_LO));
 			memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
 		}
-	} else if (!err && out_mbox)
+	} else if (!err && out_mbox) {
 		memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
+	}
 
 	mutex_unlock(&mlxsw_pci->cmd.lock);
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index 1ef9664..142f33d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -40,6 +40,7 @@
 #include "item.h"
 
 #define PCI_DEVICE_ID_MELLANOX_SWITCHX2	0xc738
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM	0xcb84
 #define MLXSW_PCI_BAR0_SIZE		(1024 * 1024) /* 1MB */
 #define MLXSW_PCI_PAGE_SIZE		4096
 
@@ -71,9 +72,7 @@
 #define MLXSW_PCI_DOORBELL(offset, type_offset, num)	\
 	((offset) + (type_offset) + (num) * 4)
 
-#define MLXSW_PCI_RDQS_COUNT	24
-#define MLXSW_PCI_SDQS_COUNT	24
-#define MLXSW_PCI_CQS_COUNT	(MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
+#define MLXSW_PCI_CQS_MAX	96
 #define MLXSW_PCI_EQS_COUNT	2
 #define MLXSW_PCI_EQ_ASYNC_NUM	0
 #define MLXSW_PCI_EQ_COMP_NUM	1
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 096e1c1..236fb5d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -99,57 +99,6 @@
  */
 MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
 
-/* SMID - Switch Multicast ID
- * --------------------------
- * In multi-chip configuration, each device should maintain mapping between
- * Multicast ID (MID) into a list of local ports. This mapping is used in all
- * the devices other than the ingress device, and is implemented as part of the
- * FDB. The MID record maps from a MID, which is a unique identi- fier of the
- * multicast group within the stacking domain, into a list of local ports into
- * which the packet is replicated.
- */
-#define MLXSW_REG_SMID_ID 0x2007
-#define MLXSW_REG_SMID_LEN 0x420
-
-static const struct mlxsw_reg_info mlxsw_reg_smid = {
-	.id = MLXSW_REG_SMID_ID,
-	.len = MLXSW_REG_SMID_LEN,
-};
-
-/* reg_smid_swid
- * Switch partition ID.
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
-
-/* reg_smid_mid
- * Multicast identifier - global identifier that represents the multicast group
- * across all devices
- * Access: Index
- */
-MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
-
-/* reg_smid_port
- * Local port memebership (1 bit per port).
- * Access: RW
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
-
-/* reg_smid_port_mask
- * Local port mask (1 bit per port).
- * Access: W
- */
-MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
-
-static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
-{
-	MLXSW_REG_ZERO(smid, payload);
-	mlxsw_reg_smid_swid_set(payload, 0);
-	mlxsw_reg_smid_mid_set(payload, mid);
-	mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
-	mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
-}
-
 /* SSPR - Switch System Port Record Register
  * -----------------------------------------
  * Configures the system port to local port mapping.
@@ -208,11 +157,359 @@
 	mlxsw_reg_sspr_system_port_set(payload, local_port);
 }
 
+/* SFDAT - Switch Filtering Database Aging Time
+ * --------------------------------------------
+ * Controls the Switch aging time. Aging time is able to be set per Switch
+ * Partition.
+ */
+#define MLXSW_REG_SFDAT_ID 0x2009
+#define MLXSW_REG_SFDAT_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_sfdat = {
+	.id = MLXSW_REG_SFDAT_ID,
+	.len = MLXSW_REG_SFDAT_LEN,
+};
+
+/* reg_sfdat_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfdat, swid, 0x00, 24, 8);
+
+/* reg_sfdat_age_time
+ * Aging time in seconds
+ * Min - 10 seconds
+ * Max - 1,000,000 seconds
+ * Default is 300 seconds.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdat, age_time, 0x04, 0, 20);
+
+static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time)
+{
+	MLXSW_REG_ZERO(sfdat, payload);
+	mlxsw_reg_sfdat_swid_set(payload, 0);
+	mlxsw_reg_sfdat_age_time_set(payload, age_time);
+}
+
+/* SFD - Switch Filtering Database
+ * -------------------------------
+ * The following register defines the access to the filtering database.
+ * The register supports querying, adding, removing and modifying the database.
+ * The access is optimized for bulk updates in which case more than one
+ * FDB record is present in the same command.
+ */
+#define MLXSW_REG_SFD_ID 0x200A
+#define MLXSW_REG_SFD_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_SFD_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_SFD_REC_MAX_COUNT 64
+#define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN +	\
+			   MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sfd = {
+	.id = MLXSW_REG_SFD_ID,
+	.len = MLXSW_REG_SFD_LEN,
+};
+
+/* reg_sfd_swid
+ * Switch partition ID for queries. Reserved on Write.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfd, swid, 0x00, 24, 8);
+
+enum mlxsw_reg_sfd_op {
+	/* Dump entire FDB a (process according to record_locator) */
+	MLXSW_REG_SFD_OP_QUERY_DUMP = 0,
+	/* Query records by {MAC, VID/FID} value */
+	MLXSW_REG_SFD_OP_QUERY_QUERY = 1,
+	/* Query and clear activity. Query records by {MAC, VID/FID} value */
+	MLXSW_REG_SFD_OP_QUERY_QUERY_AND_CLEAR_ACTIVITY = 2,
+	/* Test. Response indicates if each of the records could be
+	 * added to the FDB.
+	 */
+	MLXSW_REG_SFD_OP_WRITE_TEST = 0,
+	/* Add/modify. Aged-out records cannot be added. This command removes
+	 * the learning notification of the {MAC, VID/FID}. Response includes
+	 * the entries that were added to the FDB.
+	 */
+	MLXSW_REG_SFD_OP_WRITE_EDIT = 1,
+	/* Remove record by {MAC, VID/FID}. This command also removes
+	 * the learning notification and aged-out notifications
+	 * of the {MAC, VID/FID}. The response provides current (pre-removal)
+	 * entries as non-aged-out.
+	 */
+	MLXSW_REG_SFD_OP_WRITE_REMOVE = 2,
+	/* Remove learned notification by {MAC, VID/FID}. The response provides
+	 * the removed learning notification.
+	 */
+	MLXSW_REG_SFD_OP_WRITE_REMOVE_NOTIFICATION = 2,
+};
+
+/* reg_sfd_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfd, op, 0x04, 30, 2);
+
+/* reg_sfd_record_locator
+ * Used for querying the FDB. Use record_locator=0 to initiate the
+ * query. When a record is returned, a new record_locator is
+ * returned to be used in the subsequent query.
+ * Reserved for database update.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfd, record_locator, 0x04, 0, 30);
+
+/* reg_sfd_num_rec
+ * Request: Number of records to read/add/modify/remove
+ * Response: Number of records read/added/replaced/removed
+ * See above description for more details.
+ * Ranges 0..64
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfd, num_rec, 0x08, 0, 8);
+
+static inline void mlxsw_reg_sfd_pack(char *payload, enum mlxsw_reg_sfd_op op,
+				      u32 record_locator)
+{
+	MLXSW_REG_ZERO(sfd, payload);
+	mlxsw_reg_sfd_op_set(payload, op);
+	mlxsw_reg_sfd_record_locator_set(payload, record_locator);
+}
+
+/* reg_sfd_rec_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8,
+		     MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfd_rec_type {
+	MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0,
+};
+
+/* reg_sfd_rec_type
+ * FDB record type.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_type, MLXSW_REG_SFD_BASE_LEN, 20, 4,
+		     MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfd_rec_policy {
+	/* Replacement disabled, aging disabled. */
+	MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY = 0,
+	/* (mlag remote): Replacement enabled, aging disabled,
+	 * learning notification enabled on this port.
+	 */
+	MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG = 1,
+	/* (ingress device): Replacement enabled, aging enabled. */
+	MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS = 3,
+};
+
+/* reg_sfd_rec_policy
+ * Policy.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_policy, MLXSW_REG_SFD_BASE_LEN, 18, 2,
+		     MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+/* reg_sfd_rec_a
+ * Activity. Set for new static entries. Set for static entries if a frame SMAC
+ * lookup hits on the entry.
+ * To clear the a bit, use "query and clear activity" op.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_a, MLXSW_REG_SFD_BASE_LEN, 16, 1,
+		     MLXSW_REG_SFD_REC_LEN, 0x00, false);
+
+/* reg_sfd_rec_mac
+ * MAC address.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, sfd, rec_mac, MLXSW_REG_SFD_BASE_LEN, 6,
+		       MLXSW_REG_SFD_REC_LEN, 0x02);
+
+enum mlxsw_reg_sfd_rec_action {
+	/* forward */
+	MLXSW_REG_SFD_REC_ACTION_NOP = 0,
+	/* forward and trap, trap_id is FDB_TRAP */
+	MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1,
+	/* trap and do not forward, trap_id is FDB_TRAP */
+	MLXSW_REG_SFD_REC_ACTION_TRAP = 3,
+	MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15,
+};
+
+/* reg_sfd_rec_action
+ * Action to apply on the packet.
+ * Note: Dynamic entries can only be configured with NOP action.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4,
+		     MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+/* reg_sfd_uc_sub_port
+ * VEPA channel on local port.
+ * Valid only if local port is a non-stacking port. Must be 0 if multichannel
+ * VEPA is not enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8,
+		     MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_fid_vid
+ * Filtering ID or VLAN ID
+ * For SwitchX and SwitchX-2:
+ * - Dynamic entries (policy 2,3) use FID
+ * - Static entries (policy 0) use VID
+ * - When independent learning is configured, VID=FID
+ * For Spectrum: use FID for both Dynamic and Static entries.
+ * VID should not be used.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+		     MLXSW_REG_SFD_REC_LEN, 0x08, false);
+
+/* reg_sfd_uc_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16,
+		     MLXSW_REG_SFD_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index,
+					 enum mlxsw_reg_sfd_rec_policy policy,
+					 const char *mac, u16 vid,
+					 enum mlxsw_reg_sfd_rec_action action,
+					 u8 local_port)
+{
+	u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload);
+
+	if (rec_index >= num_rec)
+		mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1);
+	mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0);
+	mlxsw_reg_sfd_rec_type_set(payload, rec_index,
+				   MLXSW_REG_SFD_REC_TYPE_UNICAST);
+	mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy);
+	mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac);
+	mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0);
+	mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, vid);
+	mlxsw_reg_sfd_rec_action_set(payload, rec_index, action);
+	mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port);
+}
+
+static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index,
+					   char *mac, u16 *p_vid,
+					   u8 *p_local_port)
+{
+	mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac);
+	*p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index);
+	*p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index);
+}
+
+/* SFN - Switch FDB Notification Register
+ * -------------------------------------------
+ * The switch provides notifications on newly learned FDB entries and
+ * aged out entries. The notifications can be polled by software.
+ */
+#define MLXSW_REG_SFN_ID 0x200B
+#define MLXSW_REG_SFN_BASE_LEN 0x10 /* base length, without records */
+#define MLXSW_REG_SFN_REC_LEN 0x10 /* record length */
+#define MLXSW_REG_SFN_REC_MAX_COUNT 64
+#define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN +	\
+			   MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sfn = {
+	.id = MLXSW_REG_SFN_ID,
+	.len = MLXSW_REG_SFN_LEN,
+};
+
+/* reg_sfn_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8);
+
+/* reg_sfn_num_rec
+ * Request: Number of learned notifications and aged-out notification
+ * records requested.
+ * Response: Number of notification records returned (must be smaller
+ * than or equal to the value requested)
+ * Ranges 0..64
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sfn, num_rec, 0x04, 0, 8);
+
+static inline void mlxsw_reg_sfn_pack(char *payload)
+{
+	MLXSW_REG_ZERO(sfn, payload);
+	mlxsw_reg_sfn_swid_set(payload, 0);
+	mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT);
+}
+
+/* reg_sfn_rec_swid
+ * Switch partition ID.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8,
+		     MLXSW_REG_SFN_REC_LEN, 0x00, false);
+
+enum mlxsw_reg_sfn_rec_type {
+	/* MAC addresses learned on a regular port. */
+	MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5,
+	/* Aged-out MAC address on a regular port */
+	MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7,
+};
+
+/* reg_sfn_rec_type
+ * Notification record type.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4,
+		     MLXSW_REG_SFN_REC_LEN, 0x00, false);
+
+/* reg_sfn_rec_mac
+ * MAC address.
+ * Access: RO
+ */
+MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6,
+		       MLXSW_REG_SFN_REC_LEN, 0x02);
+
+/* reg_sfn_mac_sub_port
+ * VEPA channel on the local port.
+ * 0 if multichannel VEPA is not enabled.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8,
+		     MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+/* reg_sfn_mac_fid
+ * Filtering identifier.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16,
+		     MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+/* reg_sfn_mac_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16,
+		     MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index,
+					    char *mac, u16 *p_vid,
+					    u8 *p_local_port)
+{
+	mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
+	*p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
+	*p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index);
+}
+
 /* SPMS - Switch Port MSTP/RSTP State Register
  * -------------------------------------------
  * Configures the spanning tree state of a physical port.
  */
-#define MLXSW_REG_SPMS_ID 0x200d
+#define MLXSW_REG_SPMS_ID 0x200D
 #define MLXSW_REG_SPMS_LEN 0x404
 
 static const struct mlxsw_reg_info mlxsw_reg_spms = {
@@ -243,20 +540,166 @@
  */
 MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
 
-static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
-				       enum mlxsw_reg_spms_state state)
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port)
 {
 	MLXSW_REG_ZERO(spms, payload);
 	mlxsw_reg_spms_local_port_set(payload, local_port);
+}
+
+static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid,
+					   enum mlxsw_reg_spms_state state)
+{
 	mlxsw_reg_spms_state_set(payload, vid, state);
 }
 
+/* SPVID - Switch Port VID
+ * -----------------------
+ * The switch port VID configures the default VID for a port.
+ */
+#define MLXSW_REG_SPVID_ID 0x200E
+#define MLXSW_REG_SPVID_LEN 0x08
+
+static const struct mlxsw_reg_info mlxsw_reg_spvid = {
+	.id = MLXSW_REG_SPVID_ID,
+	.len = MLXSW_REG_SPVID_LEN,
+};
+
+/* reg_spvid_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
+
+/* reg_spvid_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
+
+/* reg_spvid_pvid
+ * Port default VID
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
+
+static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
+{
+	MLXSW_REG_ZERO(spvid, payload);
+	mlxsw_reg_spvid_local_port_set(payload, local_port);
+	mlxsw_reg_spvid_pvid_set(payload, pvid);
+}
+
+/* SPVM - Switch Port VLAN Membership
+ * ----------------------------------
+ * The Switch Port VLAN Membership register configures the VLAN membership
+ * of a port in a VLAN denoted by VID. VLAN membership is managed per
+ * virtual port. The register can be used to add and remove VID(s) from a port.
+ */
+#define MLXSW_REG_SPVM_ID 0x200F
+#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN +	\
+		    MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_spvm = {
+	.id = MLXSW_REG_SPVM_ID,
+	.len = MLXSW_REG_SPVM_LEN,
+};
+
+/* reg_spvm_pt
+ * Priority tagged. If this bit is set, packets forwarded to the port with
+ * untagged VLAN membership (u bit is set) will be tagged with priority tag
+ * (VID=0)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvm, pt, 0x00, 31, 1);
+
+/* reg_spvm_pte
+ * Priority Tagged Update Enable. On Write operations, if this bit is cleared,
+ * the pt bit will NOT be updated. To update the pt bit, pte must be set.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1);
+
+/* reg_spvm_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8);
+
+/* reg_spvm_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spvm, sub_port, 0x00, 8, 8);
+
+/* reg_spvm_num_rec
+ * Number of records to update. Each record contains: i, e, u, vid.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvm, num_rec, 0x00, 0, 8);
+
+/* reg_spvm_rec_i
+ * Ingress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_i,
+		     MLXSW_REG_SPVM_BASE_LEN, 14, 1,
+		     MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_e
+ * Egress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_e,
+		     MLXSW_REG_SPVM_BASE_LEN, 13, 1,
+		     MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_u
+ * Untagged - port is an untagged member - egress transmission uses untagged
+ * frames on VID<n>
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_u,
+		     MLXSW_REG_SPVM_BASE_LEN, 12, 1,
+		     MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+/* reg_spvm_rec_vid
+ * Egress membership in VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid,
+		     MLXSW_REG_SPVM_BASE_LEN, 0, 12,
+		     MLXSW_REG_SPVM_REC_LEN, 0, false);
+
+static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
+				       u16 vid_begin, u16 vid_end,
+				       bool is_member, bool untagged)
+{
+	int size = vid_end - vid_begin + 1;
+	int i;
+
+	MLXSW_REG_ZERO(spvm, payload);
+	mlxsw_reg_spvm_local_port_set(payload, local_port);
+	mlxsw_reg_spvm_num_rec_set(payload, size);
+
+	for (i = 0; i < size; i++) {
+		mlxsw_reg_spvm_rec_i_set(payload, i, is_member);
+		mlxsw_reg_spvm_rec_e_set(payload, i, is_member);
+		mlxsw_reg_spvm_rec_u_set(payload, i, untagged);
+		mlxsw_reg_spvm_rec_vid_set(payload, i, vid_begin + i);
+	}
+}
+
 /* SFGC - Switch Flooding Group Configuration
  * ------------------------------------------
  * The following register controls the association of flooding tables and MIDs
  * to packet types used for flooding.
  */
-#define MLXSW_REG_SFGC_ID  0x2011
+#define MLXSW_REG_SFGC_ID 0x2011
 #define MLXSW_REG_SFGC_LEN 0x10
 
 static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
@@ -265,13 +708,15 @@
 };
 
 enum mlxsw_reg_sfgc_type {
-	MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
-	MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
-	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
-	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
-	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
-	MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
-	MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
+	MLXSW_REG_SFGC_TYPE_BROADCAST,
+	MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+	MLXSW_REG_SFGC_TYPE_RESERVED,
+	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+	MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL,
+	MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST,
+	MLXSW_REG_SFGC_TYPE_MAX,
 };
 
 /* reg_sfgc_type
@@ -408,7 +853,7 @@
 				       unsigned int flood_table,
 				       unsigned int index,
 				       enum mlxsw_flood_table_type table_type,
-				       unsigned int range)
+				       unsigned int range, u8 port, bool set)
 {
 	MLXSW_REG_ZERO(sftr, payload);
 	mlxsw_reg_sftr_swid_set(payload, 0);
@@ -416,8 +861,8 @@
 	mlxsw_reg_sftr_index_set(payload, index);
 	mlxsw_reg_sftr_table_type_set(payload, table_type);
 	mlxsw_reg_sftr_range_set(payload, range);
-	mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
-	mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+	mlxsw_reg_sftr_port_set(payload, port, set);
+	mlxsw_reg_sftr_port_mask_set(payload, port, 1);
 }
 
 /* SPMLR - Switch Port MAC Learning Register
@@ -473,6 +918,285 @@
 	mlxsw_reg_spmlr_learn_mode_set(payload, mode);
 }
 
+/* SVFA - Switch VID to FID Allocation Register
+ * --------------------------------------------
+ * Controls the VID to FID mapping and {Port, VID} to FID mapping for
+ * virtualized ports.
+ */
+#define MLXSW_REG_SVFA_ID 0x201C
+#define MLXSW_REG_SVFA_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_svfa = {
+	.id = MLXSW_REG_SVFA_ID,
+	.len = MLXSW_REG_SVFA_LEN,
+};
+
+/* reg_svfa_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8);
+
+/* reg_svfa_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_svfa_mt {
+	MLXSW_REG_SVFA_MT_VID_TO_FID,
+	MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+};
+
+/* reg_svfa_mapping_table
+ * Mapping table:
+ * 0 - VID to FID
+ * 1 - {Port, VID} to FID
+ * Access: Index
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, mapping_table, 0x00, 8, 3);
+
+/* reg_svfa_v
+ * Valid.
+ * Valid if set.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, v, 0x00, 0, 1);
+
+/* reg_svfa_fid
+ * Filtering ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, svfa, fid, 0x04, 16, 16);
+
+/* reg_svfa_vid
+ * VLAN ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, svfa, vid, 0x04, 0, 12);
+
+/* reg_svfa_counter_set_type
+ * Counter set type for flow counters.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8);
+
+/* reg_svfa_counter_index
+ * Counter index for flow counters.
+ * Access: RW
+ *
+ * Note: Reserved for SwitchX-2.
+ */
+MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24);
+
+static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port,
+				       enum mlxsw_reg_svfa_mt mt, bool valid,
+				       u16 fid, u16 vid)
+{
+	MLXSW_REG_ZERO(svfa, payload);
+	local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port;
+	mlxsw_reg_svfa_swid_set(payload, 0);
+	mlxsw_reg_svfa_local_port_set(payload, local_port);
+	mlxsw_reg_svfa_mapping_table_set(payload, mt);
+	mlxsw_reg_svfa_v_set(payload, valid);
+	mlxsw_reg_svfa_fid_set(payload, fid);
+	mlxsw_reg_svfa_vid_set(payload, vid);
+}
+
+/* SVPE - Switch Virtual-Port Enabling Register
+ * --------------------------------------------
+ * Enables port virtualization.
+ */
+#define MLXSW_REG_SVPE_ID 0x201E
+#define MLXSW_REG_SVPE_LEN 0x4
+
+static const struct mlxsw_reg_info mlxsw_reg_svpe = {
+	.id = MLXSW_REG_SVPE_ID,
+	.len = MLXSW_REG_SVPE_LEN,
+};
+
+/* reg_svpe_local_port
+ * Local port number
+ * Access: Index
+ *
+ * Note: CPU port is not supported (uses VLAN mode only).
+ */
+MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8);
+
+/* reg_svpe_vp_en
+ * Virtual port enable.
+ * 0 - Disable, VLAN mode (VID to FID).
+ * 1 - Enable, Virtual port mode ({Port, VID} to FID).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1);
+
+static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port,
+				       bool enable)
+{
+	MLXSW_REG_ZERO(svpe, payload);
+	mlxsw_reg_svpe_local_port_set(payload, local_port);
+	mlxsw_reg_svpe_vp_en_set(payload, enable);
+}
+
+/* SFMR - Switch FID Management Register
+ * -------------------------------------
+ * Creates and configures FIDs.
+ */
+#define MLXSW_REG_SFMR_ID 0x201F
+#define MLXSW_REG_SFMR_LEN 0x18
+
+static const struct mlxsw_reg_info mlxsw_reg_sfmr = {
+	.id = MLXSW_REG_SFMR_ID,
+	.len = MLXSW_REG_SFMR_LEN,
+};
+
+enum mlxsw_reg_sfmr_op {
+	MLXSW_REG_SFMR_OP_CREATE_FID,
+	MLXSW_REG_SFMR_OP_DESTROY_FID,
+};
+
+/* reg_sfmr_op
+ * Operation.
+ * 0 - Create or edit FID.
+ * 1 - Destroy FID.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4);
+
+/* reg_sfmr_fid
+ * Filtering ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16);
+
+/* reg_sfmr_fid_offset
+ * FID offset.
+ * Used to point into the flooding table selected by SFGC register if
+ * the table is of type FID-Offset. Otherwise, this field is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16);
+
+/* reg_sfmr_vtfp
+ * Valid Tunnel Flood Pointer.
+ * If not set, then nve_tunnel_flood_ptr is reserved and considered NULL.
+ * Access: RW
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, sfmr, vtfp, 0x0C, 31, 1);
+
+/* reg_sfmr_nve_tunnel_flood_ptr
+ * Underlay Flooding and BC Pointer.
+ * Used as a pointer to the first entry of the group based link lists of
+ * flooding or BC entries (for NVE tunnels).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfmr, nve_tunnel_flood_ptr, 0x0C, 0, 24);
+
+/* reg_sfmr_vv
+ * VNI Valid.
+ * If not set, then vni is reserved.
+ * Access: RW
+ *
+ * Note: Reserved for 802.1Q FIDs.
+ */
+MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1);
+
+/* reg_sfmr_vni
+ * Virtual Network Identifier.
+ * Access: RW
+ *
+ * Note: A given VNI can only be assigned to one FID.
+ */
+MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24);
+
+static inline void mlxsw_reg_sfmr_pack(char *payload,
+				       enum mlxsw_reg_sfmr_op op, u16 fid,
+				       u16 fid_offset)
+{
+	MLXSW_REG_ZERO(sfmr, payload);
+	mlxsw_reg_sfmr_op_set(payload, op);
+	mlxsw_reg_sfmr_fid_set(payload, fid);
+	mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset);
+	mlxsw_reg_sfmr_vtfp_set(payload, false);
+	mlxsw_reg_sfmr_vv_set(payload, false);
+}
+
+/* SPVMLR - Switch Port VLAN MAC Learning Register
+ * -----------------------------------------------
+ * Controls the switch MAC learning policy per {Port, VID}.
+ */
+#define MLXSW_REG_SPVMLR_ID 0x2020
+#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
+#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
+			      MLXSW_REG_SPVMLR_REC_LEN * \
+			      MLXSW_REG_SPVMLR_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_spvmlr = {
+	.id = MLXSW_REG_SPVMLR_ID,
+	.len = MLXSW_REG_SPVMLR_LEN,
+};
+
+/* reg_spvmlr_local_port
+ * Local ingress port.
+ * Access: Index
+ *
+ * Note: CPU port is not supported.
+ */
+MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8);
+
+/* reg_spvmlr_num_rec
+ * Number of records to update.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, spvmlr, num_rec, 0x00, 0, 8);
+
+/* reg_spvmlr_rec_learn_enable
+ * 0 - Disable learning for {Port, VID}.
+ * 1 - Enable learning for {Port, VID}.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN,
+		     31, 1, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
+
+/* reg_spvmlr_rec_vid
+ * VLAN ID to be added/removed from port or for querying.
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12,
+		     MLXSW_REG_SPVMLR_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
+					 u16 vid_begin, u16 vid_end,
+					 bool learn_enable)
+{
+	int num_rec = vid_end - vid_begin + 1;
+	int i;
+
+	WARN_ON(num_rec < 1 || num_rec > MLXSW_REG_SPVMLR_REC_MAX_COUNT);
+
+	MLXSW_REG_ZERO(spvmlr, payload);
+	mlxsw_reg_spvmlr_local_port_set(payload, local_port);
+	mlxsw_reg_spvmlr_num_rec_set(payload, num_rec);
+
+	for (i = 0; i < num_rec; i++) {
+		mlxsw_reg_spvmlr_rec_learn_enable_set(payload, i, learn_enable);
+		mlxsw_reg_spvmlr_rec_vid_set(payload, i, vid_begin + i);
+	}
+}
+
 /* PMLP - Ports Module to Local Port Register
  * ------------------------------------------
  * Configures the assignment of modules to local ports.
@@ -1008,12 +1732,88 @@
 	mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
 }
 
+/* PBMC - Port Buffer Management Control Register
+ * ----------------------------------------------
+ * The PBMC register configures and retrieves the port packet buffer
+ * allocation for different Prios, and the Pause threshold management.
+ */
+#define MLXSW_REG_PBMC_ID 0x500C
+#define MLXSW_REG_PBMC_LEN 0x68
+
+static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
+	.id = MLXSW_REG_PBMC_ID,
+	.len = MLXSW_REG_PBMC_LEN,
+};
+
+/* reg_pbmc_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8);
+
+/* reg_pbmc_xoff_timer_value
+ * When device generates a pause frame, it uses this value as the pause
+ * timer (time for the peer port to pause in quota-512 bit time).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16);
+
+/* reg_pbmc_xoff_refresh
+ * The time before a new pause frame should be sent to refresh the pause RW
+ * state. Using the same units as xoff_timer_value above (in quota-512 bit
+ * time).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
+
+/* reg_pbmc_buf_lossy
+ * The field indicates if the buffer is lossy.
+ * 0 - Lossless
+ * 1 - Lossy
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_lossy, 0x0C, 25, 1, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_epsb
+ * Eligible for Port Shared buffer.
+ * If epsb is set, packets assigned to buffer are allowed to insert the port
+ * shared buffer.
+ * When buf_lossy is MLXSW_REG_PBMC_LOSSY_LOSSY this field is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false);
+
+/* reg_pbmc_buf_size
+ * The part of the packet buffer array is allocated for the specific buffer.
+ * Units are represented in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
+
+static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
+				       u16 xoff_timer_value, u16 xoff_refresh)
+{
+	MLXSW_REG_ZERO(pbmc, payload);
+	mlxsw_reg_pbmc_local_port_set(payload, local_port);
+	mlxsw_reg_pbmc_xoff_timer_value_set(payload, xoff_timer_value);
+	mlxsw_reg_pbmc_xoff_refresh_set(payload, xoff_refresh);
+}
+
+static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload,
+						    int buf_index,
+						    u16 size)
+{
+	mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 1);
+	mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
+	mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
+}
+
 /* PSPA - Port Switch Partition Allocation
  * ---------------------------------------
  * Controls the association of a port with a switch partition and enables
  * configuring ports as stacking ports.
  */
-#define MLXSW_REG_PSPA_ID 0x500d
+#define MLXSW_REG_PSPA_ID 0x500D
 #define MLXSW_REG_PSPA_LEN 0x8
 
 static const struct mlxsw_reg_info mlxsw_reg_pspa = {
@@ -1074,8 +1874,11 @@
  */
 MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
 
-#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD	0x0
-#define MLXSW_REG_HTGT_TRAP_GROUP_RX	0x1
+enum mlxsw_reg_htgt_trap_group {
+	MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+	MLXSW_REG_HTGT_TRAP_GROUP_RX,
+	MLXSW_REG_HTGT_TRAP_GROUP_CTRL,
+};
 
 /* reg_htgt_trap_group
  * Trap group number. User defined number specifying which trap groups
@@ -1142,6 +1945,7 @@
 
 #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD	0x15
 #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX	0x14
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL	0x13
 
 /* reg_htgt_local_path_rdq
  * Receive descriptor queue (RDQ) to use for the trap group.
@@ -1149,21 +1953,29 @@
  */
 MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
 
-static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
+static inline void mlxsw_reg_htgt_pack(char *payload,
+				       enum mlxsw_reg_htgt_trap_group group)
 {
 	u8 swid, rdq;
 
 	MLXSW_REG_ZERO(htgt, payload);
-	if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
+	switch (group) {
+	case MLXSW_REG_HTGT_TRAP_GROUP_EMAD:
 		swid = MLXSW_PORT_SWID_ALL_SWIDS;
 		rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
-	} else {
+		break;
+	case MLXSW_REG_HTGT_TRAP_GROUP_RX:
 		swid = 0;
 		rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+		break;
+	case MLXSW_REG_HTGT_TRAP_GROUP_CTRL:
+		swid = 0;
+		rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL;
+		break;
 	}
 	mlxsw_reg_htgt_swid_set(payload, swid);
 	mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
-	mlxsw_reg_htgt_trap_group_set(payload, trap_group);
+	mlxsw_reg_htgt_trap_group_set(payload, group);
 	mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
 	mlxsw_reg_htgt_pid_set(payload, 0);
 	mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
@@ -1254,17 +2066,290 @@
  */
 MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
 
-static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
-				       u8 trap_group, u16 trap_id)
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
 {
+	enum mlxsw_reg_htgt_trap_group trap_group;
+
 	MLXSW_REG_ZERO(hpkt, payload);
 	mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
 	mlxsw_reg_hpkt_action_set(payload, action);
+	switch (trap_id) {
+	case MLXSW_TRAP_ID_ETHEMAD:
+	case MLXSW_TRAP_ID_PUDE:
+		trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD;
+		break;
+	default:
+		trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX;
+		break;
+	}
 	mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
 	mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
 	mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
 }
 
+/* SBPR - Shared Buffer Pools Register
+ * -----------------------------------
+ * The SBPR configures and retrieves the shared buffer pools and configuration.
+ */
+#define MLXSW_REG_SBPR_ID 0xB001
+#define MLXSW_REG_SBPR_LEN 0x14
+
+static const struct mlxsw_reg_info mlxsw_reg_sbpr = {
+	.id = MLXSW_REG_SBPR_ID,
+	.len = MLXSW_REG_SBPR_LEN,
+};
+
+enum mlxsw_reg_sbpr_dir {
+	MLXSW_REG_SBPR_DIR_INGRESS,
+	MLXSW_REG_SBPR_DIR_EGRESS,
+};
+
+/* reg_sbpr_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2);
+
+/* reg_sbpr_pool
+ * Pool index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4);
+
+/* reg_sbpr_size
+ * Pool size in buffer cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24);
+
+enum mlxsw_reg_sbpr_mode {
+	MLXSW_REG_SBPR_MODE_STATIC,
+	MLXSW_REG_SBPR_MODE_DYNAMIC,
+};
+
+/* reg_sbpr_mode
+ * Pool quota calculation mode.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
+
+static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
+				       enum mlxsw_reg_sbpr_dir dir,
+				       enum mlxsw_reg_sbpr_mode mode, u32 size)
+{
+	MLXSW_REG_ZERO(sbpr, payload);
+	mlxsw_reg_sbpr_pool_set(payload, pool);
+	mlxsw_reg_sbpr_dir_set(payload, dir);
+	mlxsw_reg_sbpr_mode_set(payload, mode);
+	mlxsw_reg_sbpr_size_set(payload, size);
+}
+
+/* SBCM - Shared Buffer Class Management Register
+ * ----------------------------------------------
+ * The SBCM register configures and retrieves the shared buffer allocation
+ * and configuration according to Port-PG, including the binding to pool
+ * and definition of the associated quota.
+ */
+#define MLXSW_REG_SBCM_ID 0xB002
+#define MLXSW_REG_SBCM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbcm = {
+	.id = MLXSW_REG_SBCM_ID,
+	.len = MLXSW_REG_SBCM_LEN,
+};
+
+/* reg_sbcm_local_port
+ * Local port number.
+ * For Ingress: excludes CPU port and Router port
+ * For Egress: excludes IP Router
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
+
+/* reg_sbcm_pg_buff
+ * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress)
+ * For PG buffer: range is 0..cap_max_pg_buffers - 1
+ * For traffic class: range is 0..cap_max_tclass - 1
+ * Note that when traffic class is in MC aware mode then the traffic
+ * classes which are MC aware cannot be configured.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
+
+enum mlxsw_reg_sbcm_dir {
+	MLXSW_REG_SBCM_DIR_INGRESS,
+	MLXSW_REG_SBCM_DIR_EGRESS,
+};
+
+/* reg_sbcm_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
+
+/* reg_sbcm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
+
+/* reg_sbcm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
+
+/* reg_sbcm_pool
+ * Association of the port-priority to a pool.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
+
+static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
+				       enum mlxsw_reg_sbcm_dir dir,
+				       u32 min_buff, u32 max_buff, u8 pool)
+{
+	MLXSW_REG_ZERO(sbcm, payload);
+	mlxsw_reg_sbcm_local_port_set(payload, local_port);
+	mlxsw_reg_sbcm_pg_buff_set(payload, pg_buff);
+	mlxsw_reg_sbcm_dir_set(payload, dir);
+	mlxsw_reg_sbcm_min_buff_set(payload, min_buff);
+	mlxsw_reg_sbcm_max_buff_set(payload, max_buff);
+	mlxsw_reg_sbcm_pool_set(payload, pool);
+}
+
+/* SBPM - Shared Buffer Class Management Register
+ * ----------------------------------------------
+ * The SBPM register configures and retrieves the shared buffer allocation
+ * and configuration according to Port-Pool, including the definition
+ * of the associated quota.
+ */
+#define MLXSW_REG_SBPM_ID 0xB003
+#define MLXSW_REG_SBPM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbpm = {
+	.id = MLXSW_REG_SBPM_ID,
+	.len = MLXSW_REG_SBPM_LEN,
+};
+
+/* reg_sbpm_local_port
+ * Local port number.
+ * For Ingress: excludes CPU port and Router port
+ * For Egress: excludes IP Router
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
+
+/* reg_sbpm_pool
+ * The pool associated to quota counting on the local_port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
+
+enum mlxsw_reg_sbpm_dir {
+	MLXSW_REG_SBPM_DIR_INGRESS,
+	MLXSW_REG_SBPM_DIR_EGRESS,
+};
+
+/* reg_sbpm_dir
+ * Direction.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
+
+/* reg_sbpm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
+
+/* reg_sbpm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
+
+static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
+				       enum mlxsw_reg_sbpm_dir dir,
+				       u32 min_buff, u32 max_buff)
+{
+	MLXSW_REG_ZERO(sbpm, payload);
+	mlxsw_reg_sbpm_local_port_set(payload, local_port);
+	mlxsw_reg_sbpm_pool_set(payload, pool);
+	mlxsw_reg_sbpm_dir_set(payload, dir);
+	mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
+	mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
+}
+
+/* SBMM - Shared Buffer Multicast Management Register
+ * --------------------------------------------------
+ * The SBMM register configures and retrieves the shared buffer allocation
+ * and configuration for MC packets according to Switch-Priority, including
+ * the binding to pool and definition of the associated quota.
+ */
+#define MLXSW_REG_SBMM_ID 0xB004
+#define MLXSW_REG_SBMM_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_sbmm = {
+	.id = MLXSW_REG_SBMM_ID,
+	.len = MLXSW_REG_SBMM_LEN,
+};
+
+/* reg_sbmm_prio
+ * Switch Priority.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbmm, prio, 0x00, 8, 4);
+
+/* reg_sbmm_min_buff
+ * Minimum buffer size for the limiter, in cells.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, min_buff, 0x18, 0, 24);
+
+/* reg_sbmm_max_buff
+ * When the pool associated to the port-pg/tclass is configured to
+ * static, Maximum buffer size for the limiter configured in cells.
+ * When the pool associated to the port-pg/tclass is configured to
+ * dynamic, the max_buff holds the "alpha" parameter, supporting
+ * the following values:
+ * 0: 0
+ * i: (1/128)*2^(i-1), for i=1..14
+ * 0xFF: Infinity
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, max_buff, 0x1C, 0, 24);
+
+/* reg_sbmm_pool
+ * Association of the port-priority to a pool.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4);
+
+static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
+				       u32 max_buff, u8 pool)
+{
+	MLXSW_REG_ZERO(sbmm, payload);
+	mlxsw_reg_sbmm_prio_set(payload, prio);
+	mlxsw_reg_sbmm_min_buff_set(payload, min_buff);
+	mlxsw_reg_sbmm_max_buff_set(payload, max_buff);
+	mlxsw_reg_sbmm_pool_set(payload, pool);
+}
+
 static inline const char *mlxsw_reg_id_str(u16 reg_id)
 {
 	switch (reg_id) {
@@ -1272,18 +2357,34 @@
 		return "SGCR";
 	case MLXSW_REG_SPAD_ID:
 		return "SPAD";
-	case MLXSW_REG_SMID_ID:
-		return "SMID";
 	case MLXSW_REG_SSPR_ID:
 		return "SSPR";
+	case MLXSW_REG_SFDAT_ID:
+		return "SFDAT";
+	case MLXSW_REG_SFD_ID:
+		return "SFD";
+	case MLXSW_REG_SFN_ID:
+		return "SFN";
 	case MLXSW_REG_SPMS_ID:
 		return "SPMS";
+	case MLXSW_REG_SPVID_ID:
+		return "SPVID";
+	case MLXSW_REG_SPVM_ID:
+		return "SPVM";
 	case MLXSW_REG_SFGC_ID:
 		return "SFGC";
 	case MLXSW_REG_SFTR_ID:
 		return "SFTR";
 	case MLXSW_REG_SPMLR_ID:
 		return "SPMLR";
+	case MLXSW_REG_SVFA_ID:
+		return "SVFA";
+	case MLXSW_REG_SVPE_ID:
+		return "SVPE";
+	case MLXSW_REG_SFMR_ID:
+		return "SFMR";
+	case MLXSW_REG_SPVMLR_ID:
+		return "SPVMLR";
 	case MLXSW_REG_PMLP_ID:
 		return "PMLP";
 	case MLXSW_REG_PMTU_ID:
@@ -1296,12 +2397,22 @@
 		return "PAOS";
 	case MLXSW_REG_PPCNT_ID:
 		return "PPCNT";
+	case MLXSW_REG_PBMC_ID:
+		return "PBMC";
 	case MLXSW_REG_PSPA_ID:
 		return "PSPA";
 	case MLXSW_REG_HTGT_ID:
 		return "HTGT";
 	case MLXSW_REG_HPKT_ID:
 		return "HPKT";
+	case MLXSW_REG_SBPR_ID:
+		return "SBPR";
+	case MLXSW_REG_SBCM_ID:
+		return "SBCM";
+	case MLXSW_REG_SBPM_ID:
+		return "SBPM";
+	case MLXSW_REG_SBMM_ID:
+		return "SBMM";
 	default:
 		return "*UNKNOWN*";
 	}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
new file mode 100644
index 0000000..3be4a23
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -0,0 +1,1949 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/bitops.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
+static const char mlxsw_sp_driver_version[] = "1.0";
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_rx_is_router
+ * Packet is sent from the router. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
+
+/* tx_hdr_fid_valid
+ * Indicates if the 'fid' field is valid and should be used for
+ * forwarding lookup. Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
+
+/* tx_hdr_swid
+ * Switch partition ID. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_control_tclass
+ * Indicates if the packet should use the control TClass and not one
+ * of the data TClasses.
+ */
+MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_fid
+ * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
+ * set, otherwise calculated based on the packet's VID using VID to FID mapping.
+ * Valid for data packets only.
+ */
+MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
+				     const struct mlxsw_tx_info *tx_info)
+{
+	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+
+	memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
+	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+	mlxsw_tx_hdr_swid_set(txhdr, 0);
+	mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
+	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
+{
+	char spad_pl[MLXSW_REG_SPAD_LEN];
+	int err;
+
+	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
+	if (err)
+		return err;
+	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
+	return 0;
+}
+
+static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
+					  bool is_up)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char paos_pl[MLXSW_REG_PAOS_LEN];
+
+	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
+			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+			    MLXSW_PORT_ADMIN_STATUS_DOWN);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
+					 bool *p_is_up)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char paos_pl[MLXSW_REG_PAOS_LEN];
+	u8 oper_status;
+	int err;
+
+	mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
+	if (err)
+		return err;
+	oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+	*p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+	return 0;
+}
+
+static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
+{
+	char sfmr_pl[MLXSW_REG_SFMR_LEN];
+	int err;
+
+	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
+			    MLXSW_SP_VFID_BASE + vfid, 0);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+
+	if (err)
+		return err;
+
+	set_bit(vfid, mlxsw_sp->active_vfids);
+	return 0;
+}
+
+static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
+{
+	char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+	clear_bit(vfid, mlxsw_sp->active_vfids);
+
+	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
+			    MLXSW_SP_VFID_BASE + vfid, 0);
+	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				      unsigned char *addr)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char ppad_pl[MLXSW_REG_PPAD_LEN];
+
+	mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
+	mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
+}
+
+static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
+
+	ether_addr_copy(addr, mlxsw_sp->base_mac);
+	addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
+	return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
+}
+
+static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				       u16 vid, enum mlxsw_reg_spms_state state)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char *spms_pl;
+	int err;
+
+	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+	if (!spms_pl)
+		return -ENOMEM;
+	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+	mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+	kfree(spms_pl);
+	return err;
+}
+
+static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char pmtu_pl[MLXSW_REG_PMTU_LEN];
+	int max_mtu;
+	int err;
+
+	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
+	if (err)
+		return err;
+	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+	if (mtu > max_mtu)
+		return -EINVAL;
+
+	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				     bool enable)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char svpe_pl[MLXSW_REG_SVPE_LEN];
+
+	mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
+}
+
+int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
+				 u16 vid)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+	mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
+			    fid, vid);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
+					  u16 vid, bool learn_enable)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char *spvmlr_pl;
+	int err;
+
+	spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
+	if (!spvmlr_pl)
+		return -ENOMEM;
+	mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
+			      learn_enable);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
+	kfree(spvmlr_pl);
+	return err;
+}
+
+static int
+mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
+}
+
+static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
+				      bool *p_usable)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char pmlp_pl[MLXSW_REG_PMLP_LEN];
+	int err;
+
+	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+	if (err)
+		return err;
+	*p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+	return 0;
+}
+
+static int mlxsw_sp_port_open(struct net_device *dev)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	int err;
+
+	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+	if (err)
+		return err;
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int mlxsw_sp_port_stop(struct net_device *dev)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+}
+
+static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
+				      struct net_device *dev)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+	const struct mlxsw_tx_info tx_info = {
+		.local_port = mlxsw_sp_port->local_port,
+		.is_emad = false,
+	};
+	u64 len;
+	int err;
+
+	if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
+		return NETDEV_TX_BUSY;
+
+	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+		struct sk_buff *skb_orig = skb;
+
+		skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+		if (!skb) {
+			this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+			dev_kfree_skb_any(skb_orig);
+			return NETDEV_TX_OK;
+		}
+	}
+
+	if (eth_skb_pad(skb)) {
+		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+		return NETDEV_TX_OK;
+	}
+
+	mlxsw_sp_txhdr_construct(skb, &tx_info);
+	len = skb->len;
+	/* Due to a race we might fail here because of a full queue. In that
+	 * unlikely case we simply drop the packet.
+	 */
+	err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
+
+	if (!err) {
+		pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+		u64_stats_update_begin(&pcpu_stats->syncp);
+		pcpu_stats->tx_packets++;
+		pcpu_stats->tx_bytes += len;
+		u64_stats_update_end(&pcpu_stats->syncp);
+	} else {
+		this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
+		dev_kfree_skb_any(skb);
+	}
+	return NETDEV_TX_OK;
+}
+
+static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct sockaddr *addr = p;
+	int err;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
+	if (err)
+		return err;
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	return 0;
+}
+
+static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	int err;
+
+	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+	if (err)
+		return err;
+	dev->mtu = mtu;
+	return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sp_port_get_stats64(struct net_device *dev,
+			  struct rtnl_link_stats64 *stats)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct mlxsw_sp_port_pcpu_stats *p;
+	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+	u32 tx_dropped = 0;
+	unsigned int start;
+	int i;
+
+	for_each_possible_cpu(i) {
+		p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
+		do {
+			start = u64_stats_fetch_begin_irq(&p->syncp);
+			rx_packets	= p->rx_packets;
+			rx_bytes	= p->rx_bytes;
+			tx_packets	= p->tx_packets;
+			tx_bytes	= p->tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+		stats->rx_packets	+= rx_packets;
+		stats->rx_bytes		+= rx_bytes;
+		stats->tx_packets	+= tx_packets;
+		stats->tx_bytes		+= tx_bytes;
+		/* tx_dropped is u32, updated without syncp protection. */
+		tx_dropped	+= p->tx_dropped;
+	}
+	stats->tx_dropped	= tx_dropped;
+	return stats;
+}
+
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+			   u16 vid_end, bool is_member, bool untagged)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char *spvm_pl;
+	int err;
+
+	spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
+	if (!spvm_pl)
+		return -ENOMEM;
+
+	mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port,	vid_begin,
+			    vid_end, is_member, untagged);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
+	kfree(spvm_pl);
+	return err;
+}
+
+static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+	u16 vid, last_visited_vid;
+	int err;
+
+	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+		err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
+						   vid);
+		if (err) {
+			last_visited_vid = vid;
+			goto err_port_vid_to_fid_set;
+		}
+	}
+
+	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
+	if (err) {
+		last_visited_vid = VLAN_N_VID;
+		goto err_port_vid_to_fid_set;
+	}
+
+	return 0;
+
+err_port_vid_to_fid_set:
+	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
+		mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
+					     vid);
+	return err;
+}
+
+static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+	u16 vid;
+	int err;
+
+	err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
+	if (err)
+		return err;
+
+	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+		err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
+						   vid, vid);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
+			  u16 vid)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char *sftr_pl;
+	int err;
+
+	/* VLAN 0 is added to HW filter when device goes up, but it is
+	 * reserved in our case, so simply return.
+	 */
+	if (!vid)
+		return 0;
+
+	if (test_bit(vid, mlxsw_sp_port->active_vfids)) {
+		netdev_warn(dev, "VID=%d already configured\n", vid);
+		return 0;
+	}
+
+	if (!test_bit(vid, mlxsw_sp->active_vfids)) {
+		err = mlxsw_sp_vfid_create(mlxsw_sp, vid);
+		if (err) {
+			netdev_err(dev, "Failed to create vFID=%d\n",
+				   MLXSW_SP_VFID_BASE + vid);
+			return err;
+		}
+
+		sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+		if (!sftr_pl) {
+			err = -ENOMEM;
+			goto err_flood_table_alloc;
+		}
+		mlxsw_reg_sftr_pack(sftr_pl, 0, vid,
+				    MLXSW_REG_SFGC_TABLE_TYPE_FID, 0,
+				    MLXSW_PORT_CPU_PORT, true);
+		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+		kfree(sftr_pl);
+		if (err) {
+			netdev_err(dev, "Failed to configure flood table\n");
+			goto err_flood_table_config;
+		}
+	}
+
+	/* In case we fail in the following steps, we intentionally do not
+	 * destroy the associated vFID.
+	 */
+
+	/* When adding the first VLAN interface on a bridged port we need to
+	 * transition all the active 802.1Q bridge VLANs to use explicit
+	 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
+	 */
+	if (!mlxsw_sp_port->nr_vfids) {
+		err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
+		if (err) {
+			netdev_err(dev, "Failed to set to Virtual mode\n");
+			return err;
+		}
+	}
+
+	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+					   true, MLXSW_SP_VFID_BASE + vid, vid);
+	if (err) {
+		netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
+			   vid, MLXSW_SP_VFID_BASE + vid);
+		goto err_port_vid_to_fid_set;
+	}
+
+	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
+	if (err) {
+		netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
+		goto err_port_vid_learning_set;
+	}
+
+	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false);
+	if (err) {
+		netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
+			   vid);
+		goto err_port_add_vid;
+	}
+
+	err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
+					  MLXSW_REG_SPMS_STATE_FORWARDING);
+	if (err) {
+		netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
+		goto err_port_stp_state_set;
+	}
+
+	mlxsw_sp_port->nr_vfids++;
+	set_bit(vid, mlxsw_sp_port->active_vfids);
+
+	return 0;
+
+err_flood_table_config:
+err_flood_table_alloc:
+	mlxsw_sp_vfid_destroy(mlxsw_sp, vid);
+	return err;
+
+err_port_stp_state_set:
+	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+err_port_add_vid:
+	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+err_port_vid_learning_set:
+	mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+				     MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
+				     MLXSW_SP_VFID_BASE + vid, vid);
+err_port_vid_to_fid_set:
+	mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+	return err;
+}
+
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+			   __be16 __always_unused proto, u16 vid)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	int err;
+
+	/* VLAN 0 is removed from HW filter when device goes down, but
+	 * it is reserved in our case, so simply return.
+	 */
+	if (!vid)
+		return 0;
+
+	if (!test_bit(vid, mlxsw_sp_port->active_vfids)) {
+		netdev_warn(dev, "VID=%d does not exist\n", vid);
+		return 0;
+	}
+
+	err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
+					  MLXSW_REG_SPMS_STATE_DISCARDING);
+	if (err) {
+		netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
+		return err;
+	}
+
+	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
+	if (err) {
+		netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
+			   vid);
+		return err;
+	}
+
+	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
+	if (err) {
+		netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
+		return err;
+	}
+
+	err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
+					   MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
+					   false, MLXSW_SP_VFID_BASE + vid,
+					   vid);
+	if (err) {
+		netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
+			   vid, MLXSW_SP_VFID_BASE + vid);
+		return err;
+	}
+
+	/* When removing the last VLAN interface on a bridged port we need to
+	 * transition all active 802.1Q bridge VLANs to use VID to FID
+	 * mappings and set port's mode to VLAN mode.
+	 */
+	if (mlxsw_sp_port->nr_vfids == 1) {
+		err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+		if (err) {
+			netdev_err(dev, "Failed to set to VLAN mode\n");
+			return err;
+		}
+	}
+
+	mlxsw_sp_port->nr_vfids--;
+	clear_bit(vid, mlxsw_sp_port->active_vfids);
+
+	return 0;
+}
+
+static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
+	.ndo_open		= mlxsw_sp_port_open,
+	.ndo_stop		= mlxsw_sp_port_stop,
+	.ndo_start_xmit		= mlxsw_sp_port_xmit,
+	.ndo_set_mac_address	= mlxsw_sp_port_set_mac_address,
+	.ndo_change_mtu		= mlxsw_sp_port_change_mtu,
+	.ndo_get_stats64	= mlxsw_sp_port_get_stats64,
+	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
+	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
+	.ndo_fdb_add		= switchdev_port_fdb_add,
+	.ndo_fdb_del		= switchdev_port_fdb_del,
+	.ndo_fdb_dump		= switchdev_port_fdb_dump,
+	.ndo_bridge_setlink	= switchdev_port_bridge_setlink,
+	.ndo_bridge_getlink	= switchdev_port_bridge_getlink,
+	.ndo_bridge_dellink	= switchdev_port_bridge_dellink,
+};
+
+static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
+				      struct ethtool_drvinfo *drvinfo)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+	strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, mlxsw_sp_driver_version,
+		sizeof(drvinfo->version));
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "%d.%d.%d",
+		 mlxsw_sp->bus_info->fw_rev.major,
+		 mlxsw_sp->bus_info->fw_rev.minor,
+		 mlxsw_sp->bus_info->fw_rev.subminor);
+	strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
+		sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sp_port_hw_stats {
+	char str[ETH_GSTRING_LEN];
+	u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
+	{
+		.str = "a_frames_transmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+	},
+	{
+		.str = "a_frames_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+	},
+	{
+		.str = "a_frame_check_sequence_errors",
+		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+	},
+	{
+		.str = "a_alignment_errors",
+		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+	},
+	{
+		.str = "a_octets_transmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+	},
+	{
+		.str = "a_octets_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+	},
+	{
+		.str = "a_multicast_frames_xmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+	},
+	{
+		.str = "a_broadcast_frames_xmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+	},
+	{
+		.str = "a_multicast_frames_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+	},
+	{
+		.str = "a_broadcast_frames_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+	},
+	{
+		.str = "a_in_range_length_errors",
+		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+	},
+	{
+		.str = "a_out_of_range_length_field",
+		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+	},
+	{
+		.str = "a_frame_too_long_errors",
+		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+	},
+	{
+		.str = "a_symbol_error_during_carrier",
+		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+	},
+	{
+		.str = "a_mac_control_frames_transmitted",
+		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+	},
+	{
+		.str = "a_mac_control_frames_received",
+		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+	},
+	{
+		.str = "a_unsupported_opcodes_received",
+		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+	},
+	{
+		.str = "a_pause_mac_ctrl_frames_received",
+		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+	},
+	{
+		.str = "a_pause_mac_ctrl_frames_xmitted",
+		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+	},
+};
+
+#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+
+static void mlxsw_sp_port_get_strings(struct net_device *dev,
+				      u32 stringset, u8 *data)
+{
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
+			memcpy(p, mlxsw_sp_port_hw_stats[i].str,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static void mlxsw_sp_port_get_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+	int i;
+	int err;
+
+	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
+	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+	for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
+		data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return MLXSW_SP_PORT_HW_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+struct mlxsw_sp_port_link_mode {
+	u32 mask;
+	u32 supported;
+	u32 advertised;
+	u32 speed;
+};
+
+static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+		.supported	= SUPPORTED_100baseT_Full,
+		.advertised	= ADVERTISED_100baseT_Full,
+		.speed		= 100,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+		.speed		= 100,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+		.supported	= SUPPORTED_1000baseKX_Full,
+		.advertised	= ADVERTISED_1000baseKX_Full,
+		.speed		= 1000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+		.supported	= SUPPORTED_10000baseT_Full,
+		.advertised	= ADVERTISED_10000baseT_Full,
+		.speed		= 10000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+		.supported	= SUPPORTED_10000baseKX4_Full,
+		.advertised	= ADVERTISED_10000baseKX4_Full,
+		.speed		= 10000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+		.supported	= SUPPORTED_10000baseKR_Full,
+		.advertised	= ADVERTISED_10000baseKR_Full,
+		.speed		= 10000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+		.supported	= SUPPORTED_20000baseKR2_Full,
+		.advertised	= ADVERTISED_20000baseKR2_Full,
+		.speed		= 20000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+		.supported	= SUPPORTED_40000baseCR4_Full,
+		.advertised	= ADVERTISED_40000baseCR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+		.supported	= SUPPORTED_40000baseKR4_Full,
+		.advertised	= ADVERTISED_40000baseKR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+		.supported	= SUPPORTED_40000baseSR4_Full,
+		.advertised	= ADVERTISED_40000baseSR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+		.supported	= SUPPORTED_40000baseLR4_Full,
+		.advertised	= ADVERTISED_40000baseLR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+		.speed		= 25000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+		.speed		= 50000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+		.supported	= SUPPORTED_56000baseKR4_Full,
+		.advertised	= ADVERTISED_56000baseKR4_Full,
+		.speed		= 56000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+		.speed		= 100000,
+	},
+};
+
+#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
+
+static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+		return SUPPORTED_FIBRE;
+
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+		return SUPPORTED_Backplane;
+	return 0;
+}
+
+static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+	u32 modes = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
+			modes |= mlxsw_sp_port_link_mode[i].supported;
+	}
+	return modes;
+}
+
+static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+	u32 modes = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
+			modes |= mlxsw_sp_port_link_mode[i].advertised;
+	}
+	return modes;
+}
+
+static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+					    struct ethtool_cmd *cmd)
+{
+	u32 speed = SPEED_UNKNOWN;
+	u8 duplex = DUPLEX_UNKNOWN;
+	int i;
+
+	if (!carrier_ok)
+		goto out;
+
+	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+		if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
+			speed = mlxsw_sp_port_link_mode[i].speed;
+			duplex = DUPLEX_FULL;
+			break;
+		}
+	}
+out:
+	ethtool_cmd_speed_set(cmd, speed);
+	cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
+{
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+		return PORT_FIBRE;
+
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+		return PORT_DA;
+
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+		return PORT_NONE;
+
+	return PORT_OTHER;
+}
+
+static int mlxsw_sp_port_get_settings(struct net_device *dev,
+				      struct ethtool_cmd *cmd)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char ptys_pl[MLXSW_REG_PTYS_LEN];
+	u32 eth_proto_cap;
+	u32 eth_proto_admin;
+	u32 eth_proto_oper;
+	int err;
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+	if (err) {
+		netdev_err(dev, "Failed to get proto");
+		return err;
+	}
+	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+			      &eth_proto_admin, &eth_proto_oper);
+
+	cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
+			 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
+			 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+	cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
+	mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
+					eth_proto_oper, cmd);
+
+	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+	cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
+	cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
+
+	cmd->transceiver = XCVR_INTERNAL;
+	return 0;
+}
+
+static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
+{
+	u32 ptys_proto = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+		if (advertising & mlxsw_sp_port_link_mode[i].advertised)
+			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+	}
+	return ptys_proto;
+}
+
+static u32 mlxsw_sp_to_ptys_speed(u32 speed)
+{
+	u32 ptys_proto = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
+		if (speed == mlxsw_sp_port_link_mode[i].speed)
+			ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
+	}
+	return ptys_proto;
+}
+
+static int mlxsw_sp_port_set_settings(struct net_device *dev,
+				      struct ethtool_cmd *cmd)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char ptys_pl[MLXSW_REG_PTYS_LEN];
+	u32 speed;
+	u32 eth_proto_new;
+	u32 eth_proto_cap;
+	u32 eth_proto_admin;
+	bool is_up;
+	int err;
+
+	speed = ethtool_cmd_speed(cmd);
+
+	eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+		mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
+		mlxsw_sp_to_ptys_speed(speed);
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+	if (err) {
+		netdev_err(dev, "Failed to get proto");
+		return err;
+	}
+	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+	eth_proto_new = eth_proto_new & eth_proto_cap;
+	if (!eth_proto_new) {
+		netdev_err(dev, "Not supported proto admin requested");
+		return -EINVAL;
+	}
+	if (eth_proto_new == eth_proto_admin)
+		return 0;
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+	if (err) {
+		netdev_err(dev, "Failed to set proto admin");
+		return err;
+	}
+
+	err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
+	if (err) {
+		netdev_err(dev, "Failed to get oper status");
+		return err;
+	}
+	if (!is_up)
+		return 0;
+
+	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+	if (err) {
+		netdev_err(dev, "Failed to set admin status");
+		return err;
+	}
+
+	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+	if (err) {
+		netdev_err(dev, "Failed to set admin status");
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
+	.get_drvinfo		= mlxsw_sp_port_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_strings		= mlxsw_sp_port_get_strings,
+	.get_ethtool_stats	= mlxsw_sp_port_get_stats,
+	.get_sset_count		= mlxsw_sp_port_get_sset_count,
+	.get_settings		= mlxsw_sp_port_get_settings,
+	.set_settings		= mlxsw_sp_port_set_settings,
+};
+
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port;
+	struct net_device *dev;
+	bool usable;
+	int err;
+
+	dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
+	if (!dev)
+		return -ENOMEM;
+	mlxsw_sp_port = netdev_priv(dev);
+	mlxsw_sp_port->dev = dev;
+	mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
+	mlxsw_sp_port->local_port = local_port;
+	mlxsw_sp_port->learning = 1;
+	mlxsw_sp_port->learning_sync = 1;
+	mlxsw_sp_port->uc_flood = 1;
+	mlxsw_sp_port->pvid = 1;
+
+	mlxsw_sp_port->pcpu_stats =
+		netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
+	if (!mlxsw_sp_port->pcpu_stats) {
+		err = -ENOMEM;
+		goto err_alloc_stats;
+	}
+
+	dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
+	dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
+
+	err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
+			mlxsw_sp_port->local_port);
+		goto err_dev_addr_init;
+	}
+
+	netif_carrier_off(dev);
+
+	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+			 NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	/* Each packet needs to have a Tx header (metadata) on top all other
+	 * headers.
+	 */
+	dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+	err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
+			mlxsw_sp_port->local_port);
+		goto err_port_module_check;
+	}
+
+	if (!usable) {
+		dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+			mlxsw_sp_port->local_port);
+		goto port_not_usable;
+	}
+
+	err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+			mlxsw_sp_port->local_port);
+		goto err_port_system_port_mapping_set;
+	}
+
+	err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+			mlxsw_sp_port->local_port);
+		goto err_port_swid_set;
+	}
+
+	err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
+			mlxsw_sp_port->local_port);
+		goto err_port_mtu_set;
+	}
+
+	err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+	if (err)
+		goto err_port_admin_status_set;
+
+	err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
+			mlxsw_sp_port->local_port);
+		goto err_port_buffers_init;
+	}
+
+	mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
+	err = register_netdev(dev);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
+			mlxsw_sp_port->local_port);
+		goto err_register_netdev;
+	}
+
+	err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
+	if (err)
+		goto err_port_vlan_init;
+
+	mlxsw_sp->ports[local_port] = mlxsw_sp_port;
+	return 0;
+
+err_port_vlan_init:
+	unregister_netdev(dev);
+err_register_netdev:
+err_port_buffers_init:
+err_port_admin_status_set:
+err_port_mtu_set:
+err_port_swid_set:
+err_port_system_port_mapping_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_init:
+	free_percpu(mlxsw_sp_port->pcpu_stats);
+err_alloc_stats:
+	free_netdev(dev);
+	return err;
+}
+
+static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+	u16 vfid;
+
+	for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID)
+		mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
+}
+
+static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+	if (!mlxsw_sp_port)
+		return;
+	mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+	unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+	mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+	free_percpu(mlxsw_sp_port->pcpu_stats);
+	free_netdev(mlxsw_sp_port->dev);
+}
+
+static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
+{
+	int i;
+
+	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+		mlxsw_sp_port_remove(mlxsw_sp, i);
+	kfree(mlxsw_sp->ports);
+}
+
+static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
+{
+	size_t alloc_size;
+	int i;
+	int err;
+
+	alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
+	mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
+	if (!mlxsw_sp->ports)
+		return -ENOMEM;
+
+	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+		err = mlxsw_sp_port_create(mlxsw_sp, i);
+		if (err)
+			goto err_port_create;
+	}
+	return 0;
+
+err_port_create:
+	for (i--; i >= 1; i--)
+		mlxsw_sp_port_remove(mlxsw_sp, i);
+	kfree(mlxsw_sp->ports);
+	return err;
+}
+
+static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
+				     char *pude_pl, void *priv)
+{
+	struct mlxsw_sp *mlxsw_sp = priv;
+	struct mlxsw_sp_port *mlxsw_sp_port;
+	enum mlxsw_reg_pude_oper_status status;
+	u8 local_port;
+
+	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+	mlxsw_sp_port = mlxsw_sp->ports[local_port];
+	if (!mlxsw_sp_port) {
+		dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+			 local_port);
+		return;
+	}
+
+	status = mlxsw_reg_pude_oper_status_get(pude_pl);
+	if (status == MLXSW_PORT_OPER_STATUS_UP) {
+		netdev_info(mlxsw_sp_port->dev, "link up\n");
+		netif_carrier_on(mlxsw_sp_port->dev);
+	} else {
+		netdev_info(mlxsw_sp_port->dev, "link down\n");
+		netif_carrier_off(mlxsw_sp_port->dev);
+	}
+}
+
+static struct mlxsw_event_listener mlxsw_sp_pude_event = {
+	.func = mlxsw_sp_pude_event_func,
+	.trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
+				   enum mlxsw_event_trap_id trap_id)
+{
+	struct mlxsw_event_listener *el;
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int err;
+
+	switch (trap_id) {
+	case MLXSW_TRAP_ID_PUDE:
+		el = &mlxsw_sp_pude_event;
+		break;
+	}
+	err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
+	if (err)
+		return err;
+
+	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+	if (err)
+		goto err_event_trap_set;
+
+	return 0;
+
+err_event_trap_set:
+	mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
+	return err;
+}
+
+static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
+				      enum mlxsw_event_trap_id trap_id)
+{
+	struct mlxsw_event_listener *el;
+
+	switch (trap_id) {
+	case MLXSW_TRAP_ID_PUDE:
+		el = &mlxsw_sp_pude_event;
+		break;
+	}
+	mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
+}
+
+static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
+				      void *priv)
+{
+	struct mlxsw_sp *mlxsw_sp = priv;
+	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+	struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+
+	if (unlikely(!mlxsw_sp_port)) {
+		dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
+				     local_port);
+		return;
+	}
+
+	skb->dev = mlxsw_sp_port->dev;
+
+	pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+	u64_stats_update_begin(&pcpu_stats->syncp);
+	pcpu_stats->rx_packets++;
+	pcpu_stats->rx_bytes += skb->len;
+	u64_stats_update_end(&pcpu_stats->syncp);
+
+	skb->protocol = eth_type_trans(skb, skb->dev);
+	netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_FDB_MC,
+	},
+	/* Traps for specific L2 packet types, not trapped as FDB MC */
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_STP,
+	},
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_LACP,
+	},
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_EAPOL,
+	},
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_LLDP,
+	},
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_MMRP,
+	},
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_MVRP,
+	},
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_RPVST,
+	},
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_DHCP,
+	},
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+	},
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+	},
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+	},
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+	},
+	{
+		.func = mlxsw_sp_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+	},
+};
+
+static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+	char htgt_pl[MLXSW_REG_HTGT_LEN];
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int i;
+	int err;
+
+	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+	if (err)
+		return err;
+
+	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
+		err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
+						      &mlxsw_sp_rx_listener[i],
+						      mlxsw_sp);
+		if (err)
+			goto err_rx_listener_register;
+
+		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+				    mlxsw_sp_rx_listener[i].trap_id);
+		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+		if (err)
+			goto err_rx_trap_set;
+	}
+	return 0;
+
+err_rx_trap_set:
+	mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+					  &mlxsw_sp_rx_listener[i],
+					  mlxsw_sp);
+err_rx_listener_register:
+	for (i--; i >= 0; i--) {
+		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+				    mlxsw_sp_rx_listener[i].trap_id);
+		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+
+		mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+						  &mlxsw_sp_rx_listener[i],
+						  mlxsw_sp);
+	}
+	return err;
+}
+
+static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
+		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+				    mlxsw_sp_rx_listener[i].trap_id);
+		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
+
+		mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
+						  &mlxsw_sp_rx_listener[i],
+						  mlxsw_sp);
+	}
+}
+
+static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
+				 enum mlxsw_reg_sfgc_type type,
+				 enum mlxsw_reg_sfgc_bridge_type bridge_type)
+{
+	enum mlxsw_flood_table_type table_type;
+	enum mlxsw_sp_flood_table flood_table;
+	char sfgc_pl[MLXSW_REG_SFGC_LEN];
+
+	if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) {
+		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
+		flood_table = 0;
+	} else {
+		table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+		if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
+			flood_table = MLXSW_SP_FLOOD_TABLE_UC;
+		else
+			flood_table = MLXSW_SP_FLOOD_TABLE_BM;
+	}
+
+	mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
+			    flood_table);
+	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
+}
+
+static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
+{
+	int type, err;
+
+	/* For non-offloaded netdevs, flood all traffic types to CPU
+	 * port.
+	 */
+	for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
+		if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
+			continue;
+
+		err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
+					    MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
+		if (err)
+			return err;
+	}
+
+	/* For bridged ports, use one flooding table for unknown unicast
+	 * traffic and a second table for unregistered multicast and
+	 * broadcast.
+	 */
+	for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
+		if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
+			continue;
+
+		err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
+					    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
+			 const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+	struct mlxsw_sp *mlxsw_sp = priv;
+	int err;
+
+	mlxsw_sp->core = mlxsw_core;
+	mlxsw_sp->bus_info = mlxsw_bus_info;
+
+	err = mlxsw_sp_base_mac_get(mlxsw_sp);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
+		return err;
+	}
+
+	err = mlxsw_sp_ports_create(mlxsw_sp);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
+		goto err_ports_create;
+	}
+
+	err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
+		goto err_event_register;
+	}
+
+	err = mlxsw_sp_traps_init(mlxsw_sp);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
+		goto err_rx_listener_register;
+	}
+
+	err = mlxsw_sp_flood_init(mlxsw_sp);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
+		goto err_flood_init;
+	}
+
+	err = mlxsw_sp_buffers_init(mlxsw_sp);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
+		goto err_buffers_init;
+	}
+
+	err = mlxsw_sp_switchdev_init(mlxsw_sp);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
+		goto err_switchdev_init;
+	}
+
+	return 0;
+
+err_switchdev_init:
+err_buffers_init:
+err_flood_init:
+	mlxsw_sp_traps_fini(mlxsw_sp);
+err_rx_listener_register:
+	mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+	mlxsw_sp_ports_remove(mlxsw_sp);
+err_ports_create:
+	mlxsw_sp_vfids_fini(mlxsw_sp);
+	return err;
+}
+
+static void mlxsw_sp_fini(void *priv)
+{
+	struct mlxsw_sp *mlxsw_sp = priv;
+
+	mlxsw_sp_switchdev_fini(mlxsw_sp);
+	mlxsw_sp_traps_fini(mlxsw_sp);
+	mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
+	mlxsw_sp_ports_remove(mlxsw_sp);
+	mlxsw_sp_vfids_fini(mlxsw_sp);
+}
+
+static struct mlxsw_config_profile mlxsw_sp_config_profile = {
+	.used_max_vepa_channels		= 1,
+	.max_vepa_channels		= 0,
+	.used_max_lag			= 1,
+	.max_lag			= 64,
+	.used_max_port_per_lag		= 1,
+	.max_port_per_lag		= 16,
+	.used_max_mid			= 1,
+	.max_mid			= 7000,
+	.used_max_pgt			= 1,
+	.max_pgt			= 0,
+	.used_max_system_port		= 1,
+	.max_system_port		= 64,
+	.used_max_vlan_groups		= 1,
+	.max_vlan_groups		= 127,
+	.used_max_regions		= 1,
+	.max_regions			= 400,
+	.used_flood_tables		= 1,
+	.used_flood_mode		= 1,
+	.flood_mode			= 3,
+	.max_fid_offset_flood_tables	= 2,
+	.fid_offset_flood_table_size	= VLAN_N_VID - 1,
+	.max_fid_flood_tables		= 1,
+	.fid_flood_table_size		= VLAN_N_VID,
+	.used_max_ib_mc			= 1,
+	.max_ib_mc			= 0,
+	.used_max_pkey			= 1,
+	.max_pkey			= 0,
+	.swid_config			= {
+		{
+			.used_type	= 1,
+			.type		= MLXSW_PORT_SWID_TYPE_ETH,
+		}
+	},
+};
+
+static struct mlxsw_driver mlxsw_sp_driver = {
+	.kind			= MLXSW_DEVICE_KIND_SPECTRUM,
+	.owner			= THIS_MODULE,
+	.priv_size		= sizeof(struct mlxsw_sp),
+	.init			= mlxsw_sp_init,
+	.fini			= mlxsw_sp_fini,
+	.txhdr_construct	= mlxsw_sp_txhdr_construct,
+	.txhdr_len		= MLXSW_TXHDR_LEN,
+	.profile		= &mlxsw_sp_config_profile,
+};
+
+static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+{
+	return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
+}
+
+static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	struct net_device *dev = mlxsw_sp_port->dev;
+	int err;
+
+	/* When port is not bridged untagged packets are tagged with
+	 * PVID=VID=1, thereby creating an implicit VLAN interface in
+	 * the device. Remove it and let bridge code take care of its
+	 * own VLANs.
+	 */
+	err = mlxsw_sp_port_kill_vid(dev, 0, 1);
+	if (err)
+		netdev_err(dev, "Failed to remove VID 1\n");
+
+	return err;
+}
+
+static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	struct net_device *dev = mlxsw_sp_port->dev;
+	int err;
+
+	/* Add implicit VLAN interface in the device, so that untagged
+	 * packets will be classified to the default vFID.
+	 */
+	err = mlxsw_sp_port_add_vid(dev, 0, 1);
+	if (err)
+		netdev_err(dev, "Failed to add VID 1\n");
+
+	return err;
+}
+
+static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
+					 struct net_device *br_dev)
+{
+	return !mlxsw_sp->master_bridge.dev ||
+	       mlxsw_sp->master_bridge.dev == br_dev;
+}
+
+static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
+				       struct net_device *br_dev)
+{
+	mlxsw_sp->master_bridge.dev = br_dev;
+	mlxsw_sp->master_bridge.ref_count++;
+}
+
+static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
+				       struct net_device *br_dev)
+{
+	if (--mlxsw_sp->master_bridge.ref_count == 0)
+		mlxsw_sp->master_bridge.dev = NULL;
+}
+
+static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
+				    unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct netdev_notifier_changeupper_info *info;
+	struct mlxsw_sp_port *mlxsw_sp_port;
+	struct net_device *upper_dev;
+	struct mlxsw_sp *mlxsw_sp;
+	int err;
+
+	if (!mlxsw_sp_port_dev_check(dev))
+		return NOTIFY_DONE;
+
+	mlxsw_sp_port = netdev_priv(dev);
+	mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	info = ptr;
+
+	switch (event) {
+	case NETDEV_PRECHANGEUPPER:
+		upper_dev = info->upper_dev;
+		/* HW limitation forbids to put ports to multiple bridges. */
+		if (info->master && info->linking &&
+		    netif_is_bridge_master(upper_dev) &&
+		    !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
+			return NOTIFY_BAD;
+		break;
+	case NETDEV_CHANGEUPPER:
+		upper_dev = info->upper_dev;
+		if (info->master &&
+		    netif_is_bridge_master(upper_dev)) {
+			if (info->linking) {
+				err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
+				if (err)
+					netdev_err(dev, "Failed to join bridge\n");
+				mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
+				mlxsw_sp_port->bridged = 1;
+			} else {
+				err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
+				if (err)
+					netdev_err(dev, "Failed to leave bridge\n");
+				mlxsw_sp_port->bridged = 0;
+				mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
+			}
+		}
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
+	.notifier_call = mlxsw_sp_netdevice_event,
+};
+
+static int __init mlxsw_sp_module_init(void)
+{
+	int err;
+
+	register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+	err = mlxsw_core_driver_register(&mlxsw_sp_driver);
+	if (err)
+		goto err_core_driver_register;
+	return 0;
+
+err_core_driver_register:
+	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+	return err;
+}
+
+static void __exit mlxsw_sp_module_exit(void)
+{
+	mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+	unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+}
+
+module_init(mlxsw_sp_module_init);
+module_exit(mlxsw_sp_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox Spectrum driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
new file mode 100644
index 0000000..4365c8b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -0,0 +1,122 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_H
+#define _MLXSW_SPECTRUM_H
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+
+#include "core.h"
+
+#define MLXSW_SP_VFID_BASE VLAN_N_VID
+
+struct mlxsw_sp_port;
+
+struct mlxsw_sp {
+	unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
+	unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)];
+	struct mlxsw_sp_port **ports;
+	struct mlxsw_core *core;
+	const struct mlxsw_bus_info *bus_info;
+	unsigned char base_mac[ETH_ALEN];
+	struct {
+		struct delayed_work dw;
+#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
+		unsigned int interval; /* ms */
+	} fdb_notify;
+#define MLXSW_SP_DEFAULT_AGEING_TIME 300
+	u32 ageing_time;
+	struct {
+		struct net_device *dev;
+		unsigned int ref_count;
+	} master_bridge;
+};
+
+struct mlxsw_sp_port_pcpu_stats {
+	u64			rx_packets;
+	u64			rx_bytes;
+	u64			tx_packets;
+	u64			tx_bytes;
+	struct u64_stats_sync	syncp;
+	u32			tx_dropped;
+};
+
+struct mlxsw_sp_port {
+	struct net_device *dev;
+	struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
+	struct mlxsw_sp *mlxsw_sp;
+	u8 local_port;
+	u8 stp_state;
+	u8 learning:1,
+	   learning_sync:1,
+	   uc_flood:1,
+	   bridged:1;
+	u16 pvid;
+	/* 802.1Q bridge VLANs */
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	/* VLAN interfaces */
+	unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)];
+	u16 nr_vfids;
+};
+
+enum mlxsw_sp_flood_table {
+	MLXSW_SP_FLOOD_TABLE_UC,
+	MLXSW_SP_FLOOD_TABLE_BM,
+};
+
+int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+
+int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
+				 u16 vid);
+int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
+			   u16 vid_end, bool is_member, bool untagged);
+int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
+			  u16 vid);
+int mlxsw_sp_port_kill_vid(struct net_device *dev,
+			   __be16 __always_unused proto, u16 vid);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
new file mode 100644
index 0000000..d59195e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -0,0 +1,422 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "port.h"
+#include "reg.h"
+
+struct mlxsw_sp_pb {
+	u8 index;
+	u16 size;
+};
+
+#define MLXSW_SP_PB(_index, _size)	\
+	{				\
+		.index = _index,	\
+		.size = _size,		\
+	}
+
+static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = {
+	MLXSW_SP_PB(0, 208),
+	MLXSW_SP_PB(1, 208),
+	MLXSW_SP_PB(2, 208),
+	MLXSW_SP_PB(3, 208),
+	MLXSW_SP_PB(4, 208),
+	MLXSW_SP_PB(5, 208),
+	MLXSW_SP_PB(6, 208),
+	MLXSW_SP_PB(7, 208),
+	MLXSW_SP_PB(9, 208),
+};
+
+#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
+
+static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	char pbmc_pl[MLXSW_REG_PBMC_LEN];
+	int i;
+
+	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
+			    0xffff, 0xffff / 2);
+	for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
+		const struct mlxsw_sp_pb *pb;
+
+		pb = &mlxsw_sp_pbs[i];
+		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size);
+	}
+	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
+			       MLXSW_REG(pbmc), pbmc_pl);
+}
+
+#define MLXSW_SP_SB_BYTES_PER_CELL 96
+
+struct mlxsw_sp_sb_pool {
+	u8 pool;
+	enum mlxsw_reg_sbpr_dir dir;
+	enum mlxsw_reg_sbpr_mode mode;
+	u32 size;
+};
+
+#define MLXSW_SP_SB_POOL_INGRESS_SIZE				\
+	((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) /	\
+	 MLXSW_SP_SB_BYTES_PER_CELL)
+#define MLXSW_SP_SB_POOL_EGRESS_SIZE				\
+	((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) /	\
+	 MLXSW_SP_SB_BYTES_PER_CELL)
+
+#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size)		\
+	{							\
+		.pool = _pool,					\
+		.dir = _dir,					\
+		.mode = _mode,					\
+		.size = _size,					\
+	}
+
+#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size)			\
+	MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS,	\
+			 MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
+
+#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size)			\
+	MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS,	\
+			 MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
+
+static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = {
+	MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE),
+	MLXSW_SP_SB_POOL_INGRESS(1, 0),
+	MLXSW_SP_SB_POOL_INGRESS(2, 0),
+	MLXSW_SP_SB_POOL_INGRESS(3, 0),
+	MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+	MLXSW_SP_SB_POOL_EGRESS(1, 0),
+	MLXSW_SP_SB_POOL_EGRESS(2, 0),
+	MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+};
+
+#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools)
+
+static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp)
+{
+	char sbpr_pl[MLXSW_REG_SBPR_LEN];
+	int i;
+	int err;
+
+	for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) {
+		const struct mlxsw_sp_sb_pool *pool;
+
+		pool = &mlxsw_sp_sb_pools[i];
+		mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir,
+				    pool->mode, pool->size);
+		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+struct mlxsw_sp_sb_cm {
+	union {
+		u8 pg;
+		u8 tc;
+	} u;
+	enum mlxsw_reg_sbcm_dir dir;
+	u32 min_buff;
+	u32 max_buff;
+	u8 pool;
+};
+
+#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool)	\
+	{								\
+		.u.pg = _pg_tc,						\
+		.dir = _dir,						\
+		.min_buff = _min_buff,					\
+		.max_buff = _max_buff,					\
+		.pool = _pool,						\
+	}
+
+#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff)		\
+	MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS,			\
+		       _min_buff, _max_buff, 0)
+
+#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff)		\
+	MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS,			\
+		       _min_buff, _max_buff, 0)
+
+#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc)				\
+	MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
+	MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8),
+	MLXSW_SP_SB_CM_INGRESS(1, 0, 0),
+	MLXSW_SP_SB_CM_INGRESS(2, 0, 0),
+	MLXSW_SP_SB_CM_INGRESS(3, 0, 0),
+	MLXSW_SP_SB_CM_INGRESS(4, 0, 0),
+	MLXSW_SP_SB_CM_INGRESS(5, 0, 0),
+	MLXSW_SP_SB_CM_INGRESS(6, 0, 0),
+	MLXSW_SP_SB_CM_INGRESS(7, 0, 0),
+	MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff),
+	MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+	MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+	MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+	MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+	MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+	MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+	MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+	MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
+	MLXSW_SP_SB_CM_EGRESS(8, 0, 0),
+	MLXSW_SP_SB_CM_EGRESS(9, 0, 0),
+	MLXSW_SP_SB_CM_EGRESS(10, 0, 0),
+	MLXSW_SP_SB_CM_EGRESS(11, 0, 0),
+	MLXSW_SP_SB_CM_EGRESS(12, 0, 0),
+	MLXSW_SP_SB_CM_EGRESS(13, 0, 0),
+	MLXSW_SP_SB_CM_EGRESS(14, 0, 0),
+	MLXSW_SP_SB_CM_EGRESS(15, 0, 0),
+	MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff),
+};
+
+#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30),
+	MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31),
+};
+
+#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
+	ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
+
+static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+				const struct mlxsw_sp_sb_cm *cms,
+				size_t cms_len)
+{
+	char sbcm_pl[MLXSW_REG_SBCM_LEN];
+	int i;
+	int err;
+
+	for (i = 0; i < cms_len; i++) {
+		const struct mlxsw_sp_sb_cm *cm;
+
+		cm = &cms[i];
+		mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir,
+				    cm->min_buff, cm->max_buff, cm->pool);
+		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+				    mlxsw_sp_port->local_port, mlxsw_sp_sb_cms,
+				    MLXSW_SP_SB_CMS_LEN);
+}
+
+static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
+{
+	return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms,
+				    MLXSW_SP_CPU_PORT_SB_MCS_LEN);
+}
+
+struct mlxsw_sp_sb_pm {
+	u8 pool;
+	enum mlxsw_reg_sbpm_dir dir;
+	u32 min_buff;
+	u32 max_buff;
+};
+
+#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff)	\
+	{							\
+		.pool = _pool,					\
+		.dir = _dir,					\
+		.min_buff = _min_buff,				\
+		.max_buff = _max_buff,				\
+	}
+
+#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff)	\
+	MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS,	\
+		       _min_buff, _max_buff)
+
+#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff)	\
+	MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS,	\
+		       _min_buff, _max_buff)
+
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
+	MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff),
+	MLXSW_SP_SB_PM_INGRESS(1, 0, 0),
+	MLXSW_SP_SB_PM_INGRESS(2, 0, 0),
+	MLXSW_SP_SB_PM_INGRESS(3, 0, 0),
+	MLXSW_SP_SB_PM_EGRESS(0, 0, 7),
+	MLXSW_SP_SB_PM_EGRESS(1, 0, 0),
+	MLXSW_SP_SB_PM_EGRESS(2, 0, 0),
+	MLXSW_SP_SB_PM_EGRESS(3, 0, 0),
+};
+
+#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
+
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	char sbpm_pl[MLXSW_REG_SBPM_LEN];
+	int i;
+	int err;
+
+	for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
+		const struct mlxsw_sp_sb_pm *pm;
+
+		pm = &mlxsw_sp_sb_pms[i];
+		mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port,
+				    pm->pool, pm->dir,
+				    pm->min_buff, pm->max_buff);
+		err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
+				      MLXSW_REG(sbpm), sbpm_pl);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+struct mlxsw_sp_sb_mm {
+	u8 prio;
+	u32 min_buff;
+	u32 max_buff;
+	u8 pool;
+};
+
+#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool)	\
+	{							\
+		.prio = _prio,					\
+		.min_buff = _min_buff,				\
+		.max_buff = _max_buff,				\
+		.pool = _pool,					\
+	}
+
+static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
+	MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+	MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+};
+
+#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
+
+static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
+{
+	char sbmm_pl[MLXSW_REG_SBMM_LEN];
+	int i;
+	int err;
+
+	for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
+		const struct mlxsw_sp_sb_mm *mc;
+
+		mc = &mlxsw_sp_sb_mms[i];
+		mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff,
+				    mc->max_buff, mc->pool);
+		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
+{
+	int err;
+
+	err = mlxsw_sp_sb_pools_init(mlxsw_sp);
+	if (err)
+		return err;
+	err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
+	if (err)
+		return err;
+	err = mlxsw_sp_sb_mms_init(mlxsw_sp);
+
+	return err;
+}
+
+int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	int err;
+
+	err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+	if (err)
+		return err;
+	err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
+	if (err)
+		return err;
+	err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
+
+	return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
new file mode 100644
index 0000000..617fb22
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -0,0 +1,903 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <net/switchdev.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+
+static int mlxsw_sp_port_attr_get(struct net_device *dev,
+				  struct switchdev_attr *attr)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+	switch (attr->id) {
+	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+		attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
+		memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
+		       attr->u.ppid.id_len);
+		break;
+	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+		attr->u.brport_flags =
+			(mlxsw_sp_port->learning ? BR_LEARNING : 0) |
+			(mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
+			(mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				       u8 state)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	enum mlxsw_reg_spms_state spms_state;
+	char *spms_pl;
+	u16 vid;
+	int err;
+
+	switch (state) {
+	case BR_STATE_DISABLED: /* fall-through */
+	case BR_STATE_FORWARDING:
+		spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
+		break;
+	case BR_STATE_LISTENING: /* fall-through */
+	case BR_STATE_LEARNING:
+		spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
+		break;
+	case BR_STATE_BLOCKING:
+		spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
+		break;
+	default:
+		BUG();
+	}
+
+	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+	if (!spms_pl)
+		return -ENOMEM;
+	mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
+	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+		mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
+
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
+	kfree(spms_pl);
+	return err;
+}
+
+static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
+					    struct switchdev_trans *trans,
+					    u8 state)
+{
+	if (switchdev_trans_ph_prepare(trans))
+		return 0;
+
+	mlxsw_sp_port->stp_state = state;
+	return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
+}
+
+static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				     u16 fid_begin, u16 fid_end, bool set,
+				     bool only_uc)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	u16 range = fid_end - fid_begin + 1;
+	char *sftr_pl;
+	int err;
+
+	sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+	if (!sftr_pl)
+		return -ENOMEM;
+
+	mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin,
+			    MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
+			    mlxsw_sp_port->local_port, set);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+	if (err)
+		goto buffer_out;
+
+	/* Flooding control allows one to decide whether a given port will
+	 * flood unicast traffic for which there is no FDB entry.
+	 */
+	if (only_uc)
+		goto buffer_out;
+
+	mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin,
+			    MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
+			    mlxsw_sp_port->local_port, set);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+buffer_out:
+	kfree(sftr_pl);
+	return err;
+}
+
+static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+				      bool set)
+{
+	struct net_device *dev = mlxsw_sp_port->dev;
+	u16 vid, last_visited_vid;
+	int err;
+
+	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+		err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
+						true);
+		if (err) {
+			last_visited_vid = vid;
+			goto err_port_flood_set;
+		}
+	}
+
+	return 0;
+
+err_port_flood_set:
+	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
+		__mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
+	netdev_err(dev, "Failed to configure unicast flooding\n");
+	return err;
+}
+
+static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
+					   struct switchdev_trans *trans,
+					   unsigned long brport_flags)
+{
+	unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
+	bool set;
+	int err;
+
+	if (switchdev_trans_ph_prepare(trans))
+		return 0;
+
+	if ((uc_flood ^ brport_flags) & BR_FLOOD) {
+		set = mlxsw_sp_port->uc_flood ? false : true;
+		err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
+		if (err)
+			return err;
+	}
+
+	mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
+	mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
+	mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
+
+	return 0;
+}
+
+static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
+{
+	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
+	int err;
+
+	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
+	if (err)
+		return err;
+	mlxsw_sp->ageing_time = ageing_time;
+	return 0;
+}
+
+static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
+					    struct switchdev_trans *trans,
+					    unsigned long ageing_clock_t)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
+
+	if (switchdev_trans_ph_prepare(trans))
+		return 0;
+
+	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
+}
+
+static int mlxsw_sp_port_attr_set(struct net_device *dev,
+				  const struct switchdev_attr *attr,
+				  struct switchdev_trans *trans)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	int err = 0;
+
+	switch (attr->id) {
+	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
+						       attr->u.stp_state);
+		break;
+	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
+						      attr->u.brport_flags);
+		break;
+	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
+						       attr->u.ageing_time);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	char spvid_pl[MLXSW_REG_SPVID_LEN];
+
+	mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
+	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
+}
+
+static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+	char sfmr_pl[MLXSW_REG_SFMR_LEN];
+	int err;
+
+	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
+	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+
+	if (err)
+		return err;
+
+	set_bit(fid, mlxsw_sp->active_fids);
+	return 0;
+}
+
+static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+	char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+	clear_bit(fid, mlxsw_sp->active_fids);
+
+	mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
+			    fid, fid);
+	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
+{
+	enum mlxsw_reg_svfa_mt mt;
+
+	if (mlxsw_sp_port->nr_vfids)
+		mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+	else
+		mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+
+	return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
+}
+
+static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
+{
+	enum mlxsw_reg_svfa_mt mt;
+
+	if (!mlxsw_sp_port->nr_vfids)
+		return 0;
+
+	mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+	return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
+}
+
+static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
+				  u16 vid_end)
+{
+	u16 vid;
+	int err;
+
+	for (vid = vid_begin; vid <= vid_end; vid++) {
+		err = mlxsw_sp_port_add_vid(dev, 0, vid);
+		if (err)
+			goto err_port_add_vid;
+	}
+	return 0;
+
+err_port_add_vid:
+	for (vid--; vid >= vid_begin; vid--)
+		mlxsw_sp_port_kill_vid(dev, 0, vid);
+	return err;
+}
+
+static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+				     u16 vid_begin, u16 vid_end,
+				     bool flag_untagged, bool flag_pvid)
+{
+	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+	struct net_device *dev = mlxsw_sp_port->dev;
+	enum mlxsw_reg_svfa_mt mt;
+	u16 vid, vid_e;
+	int err;
+
+	/* In case this is invoked with BRIDGE_FLAGS_SELF and port is
+	 * not bridged, then packets ingressing through the port with
+	 * the specified VIDs will be directed to CPU.
+	 */
+	if (!mlxsw_sp_port->bridged)
+		return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
+
+	for (vid = vid_begin; vid <= vid_end; vid++) {
+		if (!test_bit(vid, mlxsw_sp->active_fids)) {
+			err = mlxsw_sp_fid_create(mlxsw_sp, vid);
+			if (err) {
+				netdev_err(dev, "Failed to create FID=%d\n",
+					   vid);
+				return err;
+			}
+
+			/* When creating a FID, we set a VID to FID mapping
+			 * regardless of the port's mode.
+			 */
+			mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+			err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
+							   true, vid, vid);
+			if (err) {
+				netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
+					   vid);
+				return err;
+			}
+		}
+
+		/* Set FID mapping according to port's mode */
+		err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
+		if (err) {
+			netdev_err(dev, "Failed to map FID=%d", vid);
+			return err;
+		}
+	}
+
+	err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
+					true, false);
+	if (err) {
+		netdev_err(dev, "Failed to configure flooding\n");
+		return err;
+	}
+
+	for (vid = vid_begin; vid <= vid_end;
+	     vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+		vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+			    vid_end);
+
+		err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true,
+					     flag_untagged);
+		if (err) {
+			netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n",
+				   vid, vid_e);
+			return err;
+		}
+	}
+
+	vid = vid_begin;
+	if (flag_pvid && mlxsw_sp_port->pvid != vid) {
+		err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
+		if (err) {
+			netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n",
+				   vid);
+			return err;
+		}
+		mlxsw_sp_port->pvid = vid;
+	}
+
+	/* Changing activity bits only if HW operation succeded */
+	for (vid = vid_begin; vid <= vid_end; vid++)
+		set_bit(vid, mlxsw_sp_port->active_vlans);
+
+	return mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
+					   mlxsw_sp_port->stp_state);
+}
+
+static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
+				   const struct switchdev_obj_port_vlan *vlan,
+				   struct switchdev_trans *trans)
+{
+	bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+	bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+	if (switchdev_trans_ph_prepare(trans))
+		return 0;
+
+	return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
+					 vlan->vid_begin, vlan->vid_end,
+					 untagged_flag, pvid_flag);
+}
+
+static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port,
+				const char *mac, u16 vid, bool adding,
+				bool dynamic)
+{
+	enum mlxsw_reg_sfd_rec_policy policy;
+	enum mlxsw_reg_sfd_op op;
+	char *sfd_pl;
+	int err;
+
+	if (!vid)
+		vid = mlxsw_sp_port->pvid;
+
+	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+	if (!sfd_pl)
+		return -ENOMEM;
+
+	policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
+			   MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
+	op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
+		      MLXSW_REG_SFD_OP_WRITE_REMOVE;
+	mlxsw_reg_sfd_pack(sfd_pl, op, 0);
+	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy,
+			      mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
+			      mlxsw_sp_port->local_port);
+	err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd),
+			      sfd_pl);
+	kfree(sfd_pl);
+
+	return err;
+}
+
+static int
+mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
+			     const struct switchdev_obj_port_fdb *fdb,
+			     struct switchdev_trans *trans)
+{
+	if (switchdev_trans_ph_prepare(trans))
+		return 0;
+
+	return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
+				    true, false);
+}
+
+static int mlxsw_sp_port_obj_add(struct net_device *dev,
+				 const struct switchdev_obj *obj,
+				 struct switchdev_trans *trans)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	int err = 0;
+
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
+					      SWITCHDEV_OBJ_PORT_VLAN(obj),
+					      trans);
+		break;
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
+						   SWITCHDEV_OBJ_PORT_FDB(obj),
+						   trans);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
+				   u16 vid_end)
+{
+	u16 vid;
+	int err;
+
+	for (vid = vid_begin; vid <= vid_end; vid++) {
+		err = mlxsw_sp_port_kill_vid(dev, 0, vid);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
+				     u16 vid_begin, u16 vid_end, bool init)
+{
+	struct net_device *dev = mlxsw_sp_port->dev;
+	u16 vid, vid_e;
+	int err;
+
+	/* In case this is invoked with BRIDGE_FLAGS_SELF and port is
+	 * not bridged, then prevent packets ingressing through the
+	 * port with the specified VIDs from being trapped to CPU.
+	 */
+	if (!init && !mlxsw_sp_port->bridged)
+		return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
+
+	for (vid = vid_begin; vid <= vid_end;
+	     vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
+		vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
+			    vid_end);
+		err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false,
+					     false);
+		if (err) {
+			netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n",
+				   vid, vid_e);
+			return err;
+		}
+	}
+
+	if ((mlxsw_sp_port->pvid >= vid_begin) &&
+	    (mlxsw_sp_port->pvid <= vid_end)) {
+		/* Default VLAN is always 1 */
+		mlxsw_sp_port->pvid = 1;
+		err = mlxsw_sp_port_pvid_set(mlxsw_sp_port,
+					     mlxsw_sp_port->pvid);
+		if (err) {
+			netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n",
+				   vid);
+			return err;
+		}
+	}
+
+	if (init)
+		goto out;
+
+	err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
+					false, false);
+	if (err) {
+		netdev_err(dev, "Failed to clear flooding\n");
+		return err;
+	}
+
+	for (vid = vid_begin; vid <= vid_end; vid++) {
+		/* Remove FID mapping in case of Virtual mode */
+		err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
+		if (err) {
+			netdev_err(dev, "Failed to unmap FID=%d", vid);
+			return err;
+		}
+	}
+
+out:
+	/* Changing activity bits only if HW operation succeded */
+	for (vid = vid_begin; vid <= vid_end; vid++)
+		clear_bit(vid, mlxsw_sp_port->active_vlans);
+
+	return 0;
+}
+
+static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
+				   const struct switchdev_obj_port_vlan *vlan)
+{
+	return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
+					 vlan->vid_begin, vlan->vid_end, false);
+}
+
+static int
+mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
+			     const struct switchdev_obj_port_fdb *fdb)
+{
+	return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
+				    false, false);
+}
+
+static int mlxsw_sp_port_obj_del(struct net_device *dev,
+				 const struct switchdev_obj *obj)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	int err = 0;
+
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
+					      SWITCHDEV_OBJ_PORT_VLAN(obj));
+		break;
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
+						   SWITCHDEV_OBJ_PORT_FDB(obj));
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
+				  struct switchdev_obj_port_fdb *fdb,
+				  switchdev_obj_dump_cb_t *cb)
+{
+	char *sfd_pl;
+	char mac[ETH_ALEN];
+	u16 vid;
+	u8 local_port;
+	u8 num_rec;
+	int stored_err = 0;
+	int i;
+	int err;
+
+	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
+	if (!sfd_pl)
+		return -ENOMEM;
+
+	mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
+	do {
+		mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
+		err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core,
+				      MLXSW_REG(sfd), sfd_pl);
+		if (err)
+			goto out;
+
+		num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
+
+		/* Even in case of error, we have to run the dump to the end
+		 * so the session in firmware is finished.
+		 */
+		if (stored_err)
+			continue;
+
+		for (i = 0; i < num_rec; i++) {
+			switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
+			case MLXSW_REG_SFD_REC_TYPE_UNICAST:
+				mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid,
+							&local_port);
+				if (local_port == mlxsw_sp_port->local_port) {
+					ether_addr_copy(fdb->addr, mac);
+					fdb->ndm_state = NUD_REACHABLE;
+					fdb->vid = vid;
+					err = cb(&fdb->obj);
+					if (err)
+						stored_err = err;
+				}
+			}
+		}
+	} while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
+
+out:
+	kfree(sfd_pl);
+	return stored_err ? stored_err : err;
+}
+
+static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
+				   struct switchdev_obj_port_vlan *vlan,
+				   switchdev_obj_dump_cb_t *cb)
+{
+	u16 vid;
+	int err = 0;
+
+	for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
+		vlan->flags = 0;
+		if (vid == mlxsw_sp_port->pvid)
+			vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+		vlan->vid_begin = vid;
+		vlan->vid_end = vid;
+		err = cb(&vlan->obj);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static int mlxsw_sp_port_obj_dump(struct net_device *dev,
+				  struct switchdev_obj *obj,
+				  switchdev_obj_dump_cb_t *cb)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+	int err = 0;
+
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
+					      SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
+		break;
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
+					     SWITCHDEV_OBJ_PORT_FDB(obj), cb);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
+	.switchdev_port_attr_get	= mlxsw_sp_port_attr_get,
+	.switchdev_port_attr_set	= mlxsw_sp_port_attr_set,
+	.switchdev_port_obj_add		= mlxsw_sp_port_obj_add,
+	.switchdev_port_obj_del		= mlxsw_sp_port_obj_del,
+	.switchdev_port_obj_dump	= mlxsw_sp_port_obj_dump,
+};
+
+static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
+					    char *sfn_pl, int rec_index,
+					    bool adding)
+{
+	struct mlxsw_sp_port *mlxsw_sp_port;
+	char mac[ETH_ALEN];
+	u8 local_port;
+	u16 vid;
+	int err;
+
+	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port);
+	mlxsw_sp_port = mlxsw_sp->ports[local_port];
+	if (!mlxsw_sp_port) {
+		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
+		return;
+	}
+
+	err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid,
+				   adding && mlxsw_sp_port->learning, true);
+	if (err) {
+		if (net_ratelimit())
+			netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
+		return;
+	}
+
+	if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) {
+		struct switchdev_notifier_fdb_info info;
+		unsigned long notifier_type;
+
+		info.addr = mac;
+		info.vid = vid;
+		notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
+		call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev,
+					 &info.info);
+	}
+}
+
+static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
+					    char *sfn_pl, int rec_index)
+{
+	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
+	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
+		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
+						rec_index, true);
+		break;
+	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
+		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
+						rec_index, false);
+		break;
+	}
+}
+
+static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+	schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
+			      msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+}
+
+static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
+{
+	struct mlxsw_sp *mlxsw_sp;
+	char *sfn_pl;
+	u8 num_rec;
+	int i;
+	int err;
+
+	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
+	if (!sfn_pl)
+		return;
+
+	mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
+
+	do {
+		mlxsw_reg_sfn_pack(sfn_pl);
+		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
+		if (err) {
+			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
+			break;
+		}
+		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
+		for (i = 0; i < num_rec; i++)
+			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
+
+	} while (num_rec);
+
+	kfree(sfn_pl);
+	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+}
+
+static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
+{
+	int err;
+
+	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
+	if (err) {
+		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
+		return err;
+	}
+	INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
+	mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
+	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
+	return 0;
+}
+
+static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
+{
+	cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
+}
+
+static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
+{
+	u16 fid;
+
+	for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
+		mlxsw_sp_fid_destroy(mlxsw_sp, fid);
+}
+
+int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
+{
+	return mlxsw_sp_fdb_init(mlxsw_sp);
+}
+
+void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
+{
+	mlxsw_sp_fdb_fini(mlxsw_sp);
+	mlxsw_sp_fids_fini(mlxsw_sp);
+}
+
+int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	struct net_device *dev = mlxsw_sp_port->dev;
+	int err;
+
+	/* Allow only untagged packets to ingress and tag them internally
+	 * with VID 1.
+	 */
+	mlxsw_sp_port->pvid = 1;
+	err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true);
+	if (err) {
+		netdev_err(dev, "Unable to init VLANs\n");
+		return err;
+	}
+
+	/* Add implicit VLAN interface in the device, so that untagged
+	 * packets will be classified to the default vFID.
+	 */
+	err = mlxsw_sp_port_add_vid(dev, 0, 1);
+	if (err)
+		netdev_err(dev, "Failed to configure default vFID\n");
+
+	return err;
+}
+
+void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+	mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
+}
+
+void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 3e52ee9..d85960c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -57,13 +57,11 @@
 
 struct mlxsw_sx_port;
 
-#define MLXSW_SW_HW_ID_LEN 6
-
 struct mlxsw_sx {
 	struct mlxsw_sx_port **ports;
 	struct mlxsw_core *core;
 	const struct mlxsw_bus_info *bus_info;
-	u8 hw_id[MLXSW_SW_HW_ID_LEN];
+	u8 hw_id[ETH_ALEN];
 };
 
 struct mlxsw_sx_port_pcpu_stats {
@@ -868,7 +866,7 @@
 	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
 
 	switch (attr->id) {
-	case SWITCHDEV_ATTR_PORT_PARENT_ID:
+	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
 		attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
 		memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
 		break;
@@ -925,7 +923,8 @@
 	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
 	if (!spms_pl)
 		return -ENOMEM;
-	mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
+	mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
+	mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
 	kfree(spms_pl);
 	return err;
@@ -1069,9 +1068,9 @@
 	return 0;
 
 err_register_netdev:
-err_port_admin_status_set:
 err_port_mac_learning_mode_set:
 err_port_stp_state_set:
+err_port_admin_status_set:
 err_port_mtu_set:
 err_port_speed_set:
 err_port_swid_set:
@@ -1148,7 +1147,7 @@
 	}
 
 	status = mlxsw_reg_pude_oper_status_get(pude_pl);
-	if (MLXSW_PORT_OPER_STATUS_UP == status) {
+	if (status == MLXSW_PORT_OPER_STATUS_UP) {
 		netdev_info(mlxsw_sx_port->dev, "link up\n");
 		netif_carrier_on(mlxsw_sx_port->dev);
 	} else {
@@ -1178,8 +1177,7 @@
 	if (err)
 		return err;
 
-	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
-			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
+	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
 	if (err)
 		goto err_event_trap_set;
@@ -1212,9 +1210,8 @@
 	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
 
 	if (unlikely(!mlxsw_sx_port)) {
-		if (net_ratelimit())
-			dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
-				 local_port);
+		dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+				     local_port);
 		return;
 	}
 
@@ -1316,6 +1313,11 @@
 	if (err)
 		return err;
 
+	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+	if (err)
+		return err;
+
 	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
 		err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
 						      &mlxsw_sx_rx_listener[i],
@@ -1324,7 +1326,6 @@
 			goto err_rx_listener_register;
 
 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
-				    MLXSW_REG_HTGT_TRAP_GROUP_RX,
 				    mlxsw_sx_rx_listener[i].trap_id);
 		err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
 		if (err)
@@ -1339,7 +1340,6 @@
 err_rx_listener_register:
 	for (i--; i >= 0; i--) {
 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
-				    MLXSW_REG_HTGT_TRAP_GROUP_RX,
 				    mlxsw_sx_rx_listener[i].trap_id);
 		mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
 
@@ -1357,7 +1357,6 @@
 
 	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
 		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
-				    MLXSW_REG_HTGT_TRAP_GROUP_RX,
 				    mlxsw_sx_rx_listener[i].trap_id);
 		mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
 
@@ -1371,25 +1370,15 @@
 {
 	char sfgc_pl[MLXSW_REG_SFGC_LEN];
 	char sgcr_pl[MLXSW_REG_SGCR_LEN];
-	char *smid_pl;
 	char *sftr_pl;
 	int err;
 
-	/* Due to FW bug, we must configure SMID. */
-	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
-	if (!smid_pl)
-		return -ENOMEM;
-	mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
-	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
-	kfree(smid_pl);
-	if (err)
-		return err;
-
 	/* Configure a flooding table, which includes only CPU port. */
 	sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
 	if (!sftr_pl)
 		return -ENOMEM;
-	mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
+	mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
+			    MLXSW_PORT_CPU_PORT, true);
 	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
 	kfree(sftr_pl);
 	if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
index 06fc46c..fdf9472 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -38,6 +38,7 @@
 
 #define MLXSW_TXHDR_LEN 0x10
 #define MLXSW_TXHDR_VERSION_0 0
+#define MLXSW_TXHDR_VERSION_1 1
 
 enum {
 	MLXSW_TXHDR_ETH_CTL,
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 3fd8ca6..36a09d9 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -33,4 +33,13 @@
 	  Enable the verify after the buffer write useful for debugging purpose.
 	  If unsure, say N.
 
+config ENCX24J600
+    tristate "ENCX24J600 support"
+    depends on SPI
+    ---help---
+      Support for the Microchip ENC424J600/624J600 ethernet chip.
+
+      To compile this driver as a module, choose M here. The module will be
+      called encx24j600.
+
 endif # NET_VENDOR_MICROCHIP
diff --git a/drivers/net/ethernet/microchip/Makefile b/drivers/net/ethernet/microchip/Makefile
index 573d429..ff78f62 100644
--- a/drivers/net/ethernet/microchip/Makefile
+++ b/drivers/net/ethernet/microchip/Makefile
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_ENC28J60) += enc28j60.o
+obj-$(CONFIG_ENCX24J600) += encx24j600.o encx24j600-regmap.o
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
new file mode 100644
index 0000000..f3bb905
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -0,0 +1,513 @@
+/**
+ * Register map access API - ENCX24J600 support
+ *
+ * Copyright 2015 Gridpoint
+ *
+ * Author: Jon Ringle <jringle@gridpoint.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "encx24j600_hw.h"
+
+static inline bool is_bits_set(int value, int mask)
+{
+	return (value & mask) == mask;
+}
+
+static int encx24j600_switch_bank(struct encx24j600_context *ctx,
+					 int bank)
+{
+	int ret = 0;
+
+	int bank_opcode = BANK_SELECT(bank);
+	ret = spi_write(ctx->spi, &bank_opcode, 1);
+	if (ret == 0)
+		ctx->bank = bank;
+
+	return ret;
+}
+
+static int encx24j600_cmdn(struct encx24j600_context *ctx, u8 opcode,
+			    const void *buf, size_t len)
+{
+	struct spi_message m;
+	struct spi_transfer t[2] = { { .tx_buf = &opcode, .len = 1, },
+				     { .tx_buf = buf, .len = len }, };
+	spi_message_init(&m);
+	spi_message_add_tail(&t[0], &m);
+	spi_message_add_tail(&t[1], &m);
+
+	return spi_sync(ctx->spi, &m);
+}
+
+static void regmap_lock_mutex(void *context)
+{
+	struct encx24j600_context *ctx = context;
+	mutex_lock(&ctx->mutex);
+}
+
+static void regmap_unlock_mutex(void *context)
+{
+	struct encx24j600_context *ctx = context;
+	mutex_unlock(&ctx->mutex);
+}
+
+static int regmap_encx24j600_sfr_read(void *context, u8 reg, u8 *val,
+				      size_t len)
+{
+	struct encx24j600_context *ctx = context;
+	u8 banked_reg = reg & ADDR_MASK;
+	u8 bank = ((reg & BANK_MASK) >> BANK_SHIFT);
+	u8 cmd = RCRU;
+	int ret = 0;
+	int i = 0;
+	u8 tx_buf[2];
+
+	if (reg < 0x80) {
+		cmd = RCRCODE | banked_reg;
+		if ((banked_reg < 0x16) && (ctx->bank != bank))
+			ret = encx24j600_switch_bank(ctx, bank);
+		if (unlikely(ret))
+			return ret;
+	} else {
+		/* Translate registers that are more effecient using
+		 * 3-byte SPI commands
+		 */
+		switch (reg) {
+		case EGPRDPT:
+			cmd = RGPRDPT; break;
+		case EGPWRPT:
+			cmd = RGPWRPT; break;
+		case ERXRDPT:
+			cmd = RRXRDPT; break;
+		case ERXWRPT:
+			cmd = RRXWRPT; break;
+		case EUDARDPT:
+			cmd = RUDARDPT; break;
+		case EUDAWRPT:
+			cmd = RUDAWRPT; break;
+		case EGPDATA:
+		case ERXDATA:
+		case EUDADATA:
+		default:
+			return -EINVAL;
+		}
+	}
+
+	tx_buf[i++] = cmd;
+	if (cmd == RCRU)
+		tx_buf[i++] = reg;
+
+	ret = spi_write_then_read(ctx->spi, tx_buf, i, val, len);
+
+	return ret;
+}
+
+static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx,
+					u8 reg, u8 *val, size_t len,
+					u8 unbanked_cmd, u8 banked_code)
+{
+	u8 banked_reg = reg & ADDR_MASK;
+	u8 bank = ((reg & BANK_MASK) >> BANK_SHIFT);
+	u8 cmd = unbanked_cmd;
+	struct spi_message m;
+	struct spi_transfer t[3] = { { .tx_buf = &cmd, .len = sizeof(cmd), },
+				     { .tx_buf = &reg, .len = sizeof(reg), },
+				     { .tx_buf = val, .len = len }, };
+
+	if (reg < 0x80) {
+		int ret = 0;
+		cmd = banked_code | banked_reg;
+		if ((banked_reg < 0x16) && (ctx->bank != bank))
+			ret = encx24j600_switch_bank(ctx, bank);
+		if (unlikely(ret))
+			return ret;
+	} else {
+		/* Translate registers that are more effecient using
+		 * 3-byte SPI commands
+		 */
+		switch (reg) {
+		case EGPRDPT:
+			cmd = WGPRDPT; break;
+		case EGPWRPT:
+			cmd = WGPWRPT; break;
+		case ERXRDPT:
+			cmd = WRXRDPT; break;
+		case ERXWRPT:
+			cmd = WRXWRPT; break;
+		case EUDARDPT:
+			cmd = WUDARDPT; break;
+		case EUDAWRPT:
+			cmd = WUDAWRPT; break;
+		case EGPDATA:
+		case ERXDATA:
+		case EUDADATA:
+		default:
+			return -EINVAL;
+		}
+	}
+
+	spi_message_init(&m);
+	spi_message_add_tail(&t[0], &m);
+
+	if (cmd == unbanked_cmd) {
+		t[1].tx_buf = &reg;
+		spi_message_add_tail(&t[1], &m);
+	}
+
+	spi_message_add_tail(&t[2], &m);
+	return spi_sync(ctx->spi, &m);
+}
+
+static int regmap_encx24j600_sfr_write(void *context, u8 reg, u8 *val,
+				       size_t len)
+{
+	struct encx24j600_context *ctx = context;
+	return regmap_encx24j600_sfr_update(ctx, reg, val, len, WCRU, WCRCODE);
+}
+
+static int regmap_encx24j600_sfr_set_bits(struct encx24j600_context *ctx,
+					  u8 reg, u8 val)
+{
+	return regmap_encx24j600_sfr_update(ctx, reg, &val, 1, BFSU, BFSCODE);
+}
+
+static int regmap_encx24j600_sfr_clr_bits(struct encx24j600_context *ctx,
+					  u8 reg, u8 val)
+{
+	return regmap_encx24j600_sfr_update(ctx, reg, &val, 1, BFCU, BFCCODE);
+}
+
+static int regmap_encx24j600_reg_update_bits(void *context, unsigned int reg,
+					     unsigned int mask,
+					     unsigned int val)
+{
+	struct encx24j600_context *ctx = context;
+
+	int ret = 0;
+	unsigned int set_mask = mask & val;
+	unsigned int clr_mask = mask & ~val;
+
+	if ((reg >= 0x40 && reg < 0x6c) || reg >= 0x80)
+		return -EINVAL;
+
+	if (set_mask & 0xff)
+		ret = regmap_encx24j600_sfr_set_bits(ctx, reg, set_mask);
+
+	set_mask = (set_mask & 0xff00) >> 8;
+
+	if ((set_mask & 0xff) && (ret == 0))
+		ret = regmap_encx24j600_sfr_set_bits(ctx, reg + 1, set_mask);
+
+	if ((clr_mask & 0xff) && (ret == 0))
+		ret = regmap_encx24j600_sfr_clr_bits(ctx, reg, clr_mask);
+
+	clr_mask = (clr_mask & 0xff00) >> 8;
+
+	if ((clr_mask & 0xff) && (ret == 0))
+		ret = regmap_encx24j600_sfr_clr_bits(ctx, reg + 1, clr_mask);
+
+	return ret;
+}
+
+int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
+				size_t count)
+{
+	struct encx24j600_context *ctx = context;
+
+	if (reg < 0xc0)
+		return encx24j600_cmdn(ctx, reg, data, count);
+	else
+		/* SPI 1-byte command. Ignore data */
+		return spi_write(ctx->spi, &reg, 1);
+}
+EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_write);
+
+int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count)
+{
+	struct encx24j600_context *ctx = context;
+
+	if (reg == RBSEL && count > 1)
+		count = 1;
+
+	return spi_write_then_read(ctx->spi, &reg, sizeof(reg), data, count);
+}
+EXPORT_SYMBOL_GPL(regmap_encx24j600_spi_read);
+
+static int regmap_encx24j600_write(void *context, const void *data,
+				   size_t len)
+{
+	u8 *dout = (u8 *)data;
+	u8 reg = dout[0];
+	++dout;
+	--len;
+
+	if (reg > 0xa0)
+		return regmap_encx24j600_spi_write(context, reg, dout, len);
+
+	if (len > 2)
+		return -EINVAL;
+
+	return regmap_encx24j600_sfr_write(context, reg, dout, len);
+}
+
+static int regmap_encx24j600_read(void *context,
+				  const void *reg_buf, size_t reg_size,
+				  void *val, size_t val_size)
+{
+	u8 reg = *(const u8 *)reg_buf;
+
+	if (reg_size != 1) {
+		pr_err("%s: reg=%02x reg_size=%zu\n", __func__, reg, reg_size);
+		return -EINVAL;
+	}
+
+	if (reg > 0xa0)
+		return regmap_encx24j600_spi_read(context, reg, val, val_size);
+
+	if (val_size > 2) {
+		pr_err("%s: reg=%02x val_size=%zu\n", __func__, reg, val_size);
+		return -EINVAL;
+	}
+
+	return regmap_encx24j600_sfr_read(context, reg, val, val_size);
+}
+
+static bool encx24j600_regmap_readable(struct device *dev, unsigned int reg)
+{
+	if ((reg < 0x36) ||
+	    ((reg >= 0x40) && (reg < 0x4c)) ||
+	    ((reg >= 0x52) && (reg < 0x56)) ||
+	    ((reg >= 0x60) && (reg < 0x66)) ||
+	    ((reg >= 0x68) && (reg < 0x80)) ||
+	    ((reg >= 0x86) && (reg < 0x92)) ||
+	    (reg == 0xc8))
+		return true;
+	else
+		return false;
+}
+
+static bool encx24j600_regmap_writeable(struct device *dev, unsigned int reg)
+{
+	if ((reg < 0x12) ||
+	    ((reg >= 0x14) && (reg < 0x1a)) ||
+	    ((reg >= 0x1c) && (reg < 0x36)) ||
+	    ((reg >= 0x40) && (reg < 0x4c)) ||
+	    ((reg >= 0x52) && (reg < 0x56)) ||
+	    ((reg >= 0x60) && (reg < 0x68)) ||
+	    ((reg >= 0x6c) && (reg < 0x80)) ||
+	    ((reg >= 0x86) && (reg < 0x92)) ||
+	    ((reg >= 0xc0) && (reg < 0xc8)) ||
+	    ((reg >= 0xca) && (reg < 0xf0)))
+		return true;
+	else
+		return false;
+}
+
+static bool encx24j600_regmap_volatile(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case ERXHEAD:
+	case EDMACS:
+	case ETXSTAT:
+	case ETXWIRE:
+	case ECON1:	/* Can be modified via single byte cmds */
+	case ECON2:	/* Can be modified via single byte cmds */
+	case ESTAT:
+	case EIR:	/* Can be modified via single byte cmds */
+	case MIRD:
+	case MISTAT:
+		return true;
+	default:
+		break;
+	}
+
+	return false;
+}
+
+static bool encx24j600_regmap_precious(struct device *dev, unsigned int reg)
+{
+	/* single byte cmds are precious */
+	if (((reg >= 0xc0) && (reg < 0xc8)) ||
+	    ((reg >= 0xca) && (reg < 0xf0)))
+		return true;
+	else
+		return false;
+}
+
+static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg,
+					  unsigned int *val)
+{
+	struct encx24j600_context *ctx = context;
+	int ret;
+	unsigned int mistat;
+
+	reg = MIREGADR_VAL | (reg & PHREG_MASK);
+	ret = regmap_write(ctx->regmap, MIREGADR, reg);
+	if (unlikely(ret))
+		goto err_out;
+
+	ret = regmap_write(ctx->regmap, MICMD, MIIRD);
+	if (unlikely(ret))
+		goto err_out;
+
+	usleep_range(26, 100);
+	while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
+	       (mistat & BUSY))
+		cpu_relax();
+
+	if (unlikely(ret))
+		goto err_out;
+
+	ret = regmap_write(ctx->regmap, MICMD, 0);
+	if (unlikely(ret))
+		goto err_out;
+
+	ret = regmap_read(ctx->regmap, MIRD, val);
+
+err_out:
+	if (ret)
+		pr_err("%s: error %d reading reg %02x\n", __func__, ret,
+		       reg & PHREG_MASK);
+
+	return ret;
+}
+
+static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg,
+					   unsigned int val)
+{
+	struct encx24j600_context *ctx = context;
+	int ret;
+	unsigned int mistat;
+
+	reg = MIREGADR_VAL | (reg & PHREG_MASK);
+	ret = regmap_write(ctx->regmap, MIREGADR, reg);
+	if (unlikely(ret))
+		goto err_out;
+
+	ret = regmap_write(ctx->regmap, MIWR, val);
+	if (unlikely(ret))
+		goto err_out;
+
+	usleep_range(26, 100);
+	while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
+	       (mistat & BUSY))
+		cpu_relax();
+
+err_out:
+	if (ret)
+		pr_err("%s: error %d writing reg %02x=%04x\n", __func__, ret,
+		       reg & PHREG_MASK, val);
+
+	return ret;
+}
+
+static bool encx24j600_phymap_readable(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case PHCON1:
+	case PHSTAT1:
+	case PHANA:
+	case PHANLPA:
+	case PHANE:
+	case PHCON2:
+	case PHSTAT2:
+	case PHSTAT3:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool encx24j600_phymap_writeable(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case PHCON1:
+	case PHCON2:
+	case PHANA:
+		return true;
+	case PHSTAT1:
+	case PHSTAT2:
+	case PHSTAT3:
+	case PHANLPA:
+	case PHANE:
+	default:
+		return false;
+	}
+}
+
+static bool encx24j600_phymap_volatile(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case PHSTAT1:
+	case PHSTAT2:
+	case PHSTAT3:
+	case PHANLPA:
+	case PHANE:
+	case PHCON2:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static struct regmap_config regcfg = {
+	.name = "reg",
+	.reg_bits = 8,
+	.val_bits = 16,
+	.max_register = 0xee,
+	.reg_stride = 2,
+	.cache_type = REGCACHE_RBTREE,
+	.val_format_endian = REGMAP_ENDIAN_LITTLE,
+	.readable_reg = encx24j600_regmap_readable,
+	.writeable_reg = encx24j600_regmap_writeable,
+	.volatile_reg = encx24j600_regmap_volatile,
+	.precious_reg = encx24j600_regmap_precious,
+	.lock = regmap_lock_mutex,
+	.unlock = regmap_unlock_mutex,
+};
+
+static struct regmap_bus regmap_encx24j600 = {
+	.write = regmap_encx24j600_write,
+	.read = regmap_encx24j600_read,
+	.reg_update_bits = regmap_encx24j600_reg_update_bits,
+};
+
+static struct regmap_config phycfg = {
+	.name = "phy",
+	.reg_bits = 8,
+	.val_bits = 16,
+	.max_register = 0x1f,
+	.cache_type = REGCACHE_RBTREE,
+	.val_format_endian = REGMAP_ENDIAN_LITTLE,
+	.readable_reg = encx24j600_phymap_readable,
+	.writeable_reg = encx24j600_phymap_writeable,
+	.volatile_reg = encx24j600_phymap_volatile,
+};
+static struct regmap_bus phymap_encx24j600 = {
+	.reg_write = regmap_encx24j600_phy_reg_write,
+	.reg_read = regmap_encx24j600_phy_reg_read,
+};
+
+void devm_regmap_init_encx24j600(struct device *dev,
+				 struct encx24j600_context *ctx)
+{
+	mutex_init(&ctx->mutex);
+	regcfg.lock_arg = ctx;
+	ctx->regmap = devm_regmap_init(dev, &regmap_encx24j600, ctx, &regcfg);
+	ctx->phymap = devm_regmap_init(dev, &phymap_encx24j600, ctx, &phycfg);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_encx24j600);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
new file mode 100644
index 0000000..bf08ce2
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -0,0 +1,1127 @@
+/**
+ * Microchip ENCX24J600 ethernet driver
+ *
+ * Copyright (C) 2015 Gridpoint
+ * Author: Jon Ringle <jringle@gridpoint.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spi/spi.h>
+
+#include "encx24j600_hw.h"
+
+#define DRV_NAME	"encx24j600"
+#define DRV_VERSION	"1.0"
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+/* SRAM memory layout:
+ *
+ * 0x0000-0x05ff TX buffers  1.5KB  (1*1536) reside in the GP area in SRAM
+ * 0x0600-0x5fff RX buffers 22.5KB (15*1536) reside in the RX area in SRAM
+ */
+#define ENC_TX_BUF_START 0x0000U
+#define ENC_RX_BUF_START 0x0600U
+#define ENC_RX_BUF_END   0x5fffU
+#define ENC_SRAM_SIZE    0x6000U
+
+enum {
+	RXFILTER_NORMAL,
+	RXFILTER_MULTI,
+	RXFILTER_PROMISC
+};
+
+struct encx24j600_priv {
+	struct net_device        *ndev;
+	struct mutex              lock; /* device access lock */
+	struct encx24j600_context ctx;
+	struct sk_buff           *tx_skb;
+	struct task_struct       *kworker_task;
+	struct kthread_worker     kworker;
+	struct kthread_work       tx_work;
+	struct kthread_work       setrx_work;
+	u16                       next_packet;
+	bool                      hw_enabled;
+	bool                      full_duplex;
+	bool                      autoneg;
+	u16                       speed;
+	int                       rxfilter;
+	u32                       msg_enable;
+};
+
+static void dump_packet(const char *msg, int len, const char *data)
+{
+	pr_debug(DRV_NAME ": %s - packet len:%d\n", msg, len);
+	print_hex_dump_bytes("pk data: ", DUMP_PREFIX_OFFSET, data, len);
+}
+
+static void encx24j600_dump_rsv(struct encx24j600_priv *priv, const char *msg,
+				struct rsv *rsv)
+{
+	struct net_device *dev = priv->ndev;
+
+	netdev_info(dev, "RX packet Len:%d\n", rsv->len);
+	netdev_dbg(dev, "%s - NextPk: 0x%04x\n", msg,
+		   rsv->next_packet);
+	netdev_dbg(dev, "RxOK: %d, DribbleNibble: %d\n",
+		   RSV_GETBIT(rsv->rxstat, RSV_RXOK),
+		   RSV_GETBIT(rsv->rxstat, RSV_DRIBBLENIBBLE));
+	netdev_dbg(dev, "CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+		   RSV_GETBIT(rsv->rxstat, RSV_CRCERROR),
+		   RSV_GETBIT(rsv->rxstat, RSV_LENCHECKERR),
+		   RSV_GETBIT(rsv->rxstat, RSV_LENOUTOFRANGE));
+	netdev_dbg(dev, "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n",
+		   RSV_GETBIT(rsv->rxstat, RSV_RXMULTICAST),
+		   RSV_GETBIT(rsv->rxstat, RSV_RXBROADCAST),
+		   RSV_GETBIT(rsv->rxstat, RSV_RXLONGEVDROPEV),
+		   RSV_GETBIT(rsv->rxstat, RSV_CARRIEREV));
+	netdev_dbg(dev, "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n",
+		   RSV_GETBIT(rsv->rxstat, RSV_RXCONTROLFRAME),
+		   RSV_GETBIT(rsv->rxstat, RSV_RXPAUSEFRAME),
+		   RSV_GETBIT(rsv->rxstat, RSV_RXUNKNOWNOPCODE),
+		   RSV_GETBIT(rsv->rxstat, RSV_RXTYPEVLAN));
+}
+
+static u16 encx24j600_read_reg(struct encx24j600_priv *priv, u8 reg)
+{
+	struct net_device *dev = priv->ndev;
+	unsigned int val = 0;
+	int ret = regmap_read(priv->ctx.regmap, reg, &val);
+	if (unlikely(ret))
+		netif_err(priv, drv, dev, "%s: error %d reading reg %02x\n",
+			  __func__, ret, reg);
+	return val;
+}
+
+static void encx24j600_write_reg(struct encx24j600_priv *priv, u8 reg, u16 val)
+{
+	struct net_device *dev = priv->ndev;
+	int ret = regmap_write(priv->ctx.regmap, reg, val);
+	if (unlikely(ret))
+		netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
+			  __func__, ret, reg, val);
+}
+
+static void encx24j600_update_reg(struct encx24j600_priv *priv, u8 reg,
+				  u16 mask, u16 val)
+{
+	struct net_device *dev = priv->ndev;
+	int ret = regmap_update_bits(priv->ctx.regmap, reg, mask, val);
+	if (unlikely(ret))
+		netif_err(priv, drv, dev, "%s: error %d updating reg %02x=%04x~%04x\n",
+			  __func__, ret, reg, val, mask);
+}
+
+static u16 encx24j600_read_phy(struct encx24j600_priv *priv, u8 reg)
+{
+	struct net_device *dev = priv->ndev;
+	unsigned int val = 0;
+	int ret = regmap_read(priv->ctx.phymap, reg, &val);
+	if (unlikely(ret))
+		netif_err(priv, drv, dev, "%s: error %d reading %02x\n",
+			  __func__, ret, reg);
+	return val;
+}
+
+static void encx24j600_write_phy(struct encx24j600_priv *priv, u8 reg, u16 val)
+{
+	struct net_device *dev = priv->ndev;
+	int ret = regmap_write(priv->ctx.phymap, reg, val);
+	if (unlikely(ret))
+		netif_err(priv, drv, dev, "%s: error %d writing reg %02x=%04x\n",
+			  __func__, ret, reg, val);
+}
+
+static void encx24j600_clr_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
+{
+	encx24j600_update_reg(priv, reg, mask, 0);
+}
+
+static void encx24j600_set_bits(struct encx24j600_priv *priv, u8 reg, u16 mask)
+{
+	encx24j600_update_reg(priv, reg, mask, mask);
+}
+
+static void encx24j600_cmd(struct encx24j600_priv *priv, u8 cmd)
+{
+	struct net_device *dev = priv->ndev;
+	int ret = regmap_write(priv->ctx.regmap, cmd, 0);
+	if (unlikely(ret))
+		netif_err(priv, drv, dev, "%s: error %d with cmd %02x\n",
+			  __func__, ret, cmd);
+}
+
+static int encx24j600_raw_read(struct encx24j600_priv *priv, u8 reg, u8 *data,
+			       size_t count)
+{
+	int ret;
+	mutex_lock(&priv->ctx.mutex);
+	ret = regmap_encx24j600_spi_read(&priv->ctx, reg, data, count);
+	mutex_unlock(&priv->ctx.mutex);
+
+	return ret;
+}
+
+static int encx24j600_raw_write(struct encx24j600_priv *priv, u8 reg,
+				const u8 *data, size_t count)
+{
+	int ret;
+	mutex_lock(&priv->ctx.mutex);
+	ret = regmap_encx24j600_spi_write(&priv->ctx, reg, data, count);
+	mutex_unlock(&priv->ctx.mutex);
+
+	return ret;
+}
+
+static void encx24j600_update_phcon1(struct encx24j600_priv *priv)
+{
+	u16 phcon1 = encx24j600_read_phy(priv, PHCON1);
+	if (priv->autoneg == AUTONEG_ENABLE) {
+		phcon1 |= ANEN | RENEG;
+	} else {
+		phcon1 &= ~ANEN;
+		if (priv->speed == SPEED_100)
+			phcon1 |= SPD100;
+		else
+			phcon1 &= ~SPD100;
+
+		if (priv->full_duplex)
+			phcon1 |= PFULDPX;
+		else
+			phcon1 &= ~PFULDPX;
+	}
+	encx24j600_write_phy(priv, PHCON1, phcon1);
+}
+
+/* Waits for autonegotiation to complete. */
+static int encx24j600_wait_for_autoneg(struct encx24j600_priv *priv)
+{
+	struct net_device *dev = priv->ndev;
+	unsigned long timeout = jiffies + msecs_to_jiffies(2000);
+	u16 phstat1;
+	u16 estat;
+	int ret = 0;
+
+	phstat1 = encx24j600_read_phy(priv, PHSTAT1);
+	while ((phstat1 & ANDONE) == 0) {
+		if (time_after(jiffies, timeout)) {
+			u16 phstat3;
+
+			netif_notice(priv, drv, dev, "timeout waiting for autoneg done\n");
+
+			priv->autoneg = AUTONEG_DISABLE;
+			phstat3 = encx24j600_read_phy(priv, PHSTAT3);
+			priv->speed = (phstat3 & PHY3SPD100)
+				      ? SPEED_100 : SPEED_10;
+			priv->full_duplex = (phstat3 & PHY3DPX) ? 1 : 0;
+			encx24j600_update_phcon1(priv);
+			netif_notice(priv, drv, dev, "Using parallel detection: %s/%s",
+				     priv->speed == SPEED_100 ? "100" : "10",
+				     priv->full_duplex ? "Full" : "Half");
+
+			return -ETIMEDOUT;
+		}
+		cpu_relax();
+		phstat1 = encx24j600_read_phy(priv, PHSTAT1);
+	}
+
+	estat = encx24j600_read_reg(priv, ESTAT);
+	if (estat & PHYDPX) {
+		encx24j600_set_bits(priv, MACON2, FULDPX);
+		encx24j600_write_reg(priv, MABBIPG, 0x15);
+	} else {
+		encx24j600_clr_bits(priv, MACON2, FULDPX);
+		encx24j600_write_reg(priv, MABBIPG, 0x12);
+		/* Max retransmittions attempt  */
+		encx24j600_write_reg(priv, MACLCON, 0x370f);
+	}
+
+	return ret;
+}
+
+/* Access the PHY to determine link status */
+static void encx24j600_check_link_status(struct encx24j600_priv *priv)
+{
+	struct net_device *dev = priv->ndev;
+	u16 estat;
+
+	estat = encx24j600_read_reg(priv, ESTAT);
+
+	if (estat & PHYLNK) {
+		if (priv->autoneg == AUTONEG_ENABLE)
+			encx24j600_wait_for_autoneg(priv);
+
+		netif_carrier_on(dev);
+		netif_info(priv, ifup, dev, "link up\n");
+	} else {
+		netif_info(priv, ifdown, dev, "link down\n");
+
+		/* Re-enable autoneg since we won't know what we might be
+		 * connected to when the link is brought back up again.
+		 */
+		priv->autoneg  = AUTONEG_ENABLE;
+		priv->full_duplex = true;
+		priv->speed = SPEED_100;
+		netif_carrier_off(dev);
+	}
+}
+
+static void encx24j600_int_link_handler(struct encx24j600_priv *priv)
+{
+	struct net_device *dev = priv->ndev;
+
+	netif_dbg(priv, intr, dev, "%s", __func__);
+	encx24j600_check_link_status(priv);
+	encx24j600_clr_bits(priv, EIR, LINKIF);
+}
+
+static void encx24j600_tx_complete(struct encx24j600_priv *priv, bool err)
+{
+	struct net_device *dev = priv->ndev;
+
+	if (!priv->tx_skb) {
+		BUG();
+		return;
+	}
+
+	mutex_lock(&priv->lock);
+
+	if (err)
+		dev->stats.tx_errors++;
+	else
+		dev->stats.tx_packets++;
+
+	dev->stats.tx_bytes += priv->tx_skb->len;
+
+	encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
+
+	netif_dbg(priv, tx_done, dev, "TX Done%s\n", err ? ": Err" : "");
+
+	dev_kfree_skb(priv->tx_skb);
+	priv->tx_skb = NULL;
+
+	netif_wake_queue(dev);
+
+	mutex_unlock(&priv->lock);
+}
+
+static int encx24j600_receive_packet(struct encx24j600_priv *priv,
+				     struct rsv *rsv)
+{
+	struct net_device *dev = priv->ndev;
+	struct sk_buff *skb = netdev_alloc_skb(dev, rsv->len + NET_IP_ALIGN);
+	if (!skb) {
+		pr_err_ratelimited("RX: OOM: packet dropped\n");
+		dev->stats.rx_dropped++;
+		return -ENOMEM;
+	}
+	skb_reserve(skb, NET_IP_ALIGN);
+	encx24j600_raw_read(priv, RRXDATA, skb_put(skb, rsv->len), rsv->len);
+
+	if (netif_msg_pktdata(priv))
+		dump_packet("RX", skb->len, skb->data);
+
+	skb->dev = dev;
+	skb->protocol = eth_type_trans(skb, dev);
+	skb->ip_summed = CHECKSUM_COMPLETE;
+
+	/* Maintain stats */
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += rsv->len;
+	priv->next_packet = rsv->next_packet;
+
+	netif_rx(skb);
+
+	return 0;
+}
+
+static void encx24j600_rx_packets(struct encx24j600_priv *priv, u8 packet_count)
+{
+	struct net_device *dev = priv->ndev;
+
+	while (packet_count--) {
+		struct rsv rsv;
+		u16 newrxtail;
+
+		encx24j600_write_reg(priv, ERXRDPT, priv->next_packet);
+		encx24j600_raw_read(priv, RRXDATA, (u8 *)&rsv, sizeof(rsv));
+
+		if (netif_msg_rx_status(priv))
+			encx24j600_dump_rsv(priv, __func__, &rsv);
+
+		if (!RSV_GETBIT(rsv.rxstat, RSV_RXOK) ||
+		    (rsv.len > MAX_FRAMELEN)) {
+			netif_err(priv, rx_err, dev, "RX Error %04x\n",
+				  rsv.rxstat);
+			dev->stats.rx_errors++;
+
+			if (RSV_GETBIT(rsv.rxstat, RSV_CRCERROR))
+				dev->stats.rx_crc_errors++;
+			if (RSV_GETBIT(rsv.rxstat, RSV_LENCHECKERR))
+				dev->stats.rx_frame_errors++;
+			if (rsv.len > MAX_FRAMELEN)
+				dev->stats.rx_over_errors++;
+		} else {
+			encx24j600_receive_packet(priv, &rsv);
+		}
+
+		newrxtail = priv->next_packet - 2;
+		if (newrxtail == ENC_RX_BUF_START)
+			newrxtail = SRAM_SIZE - 2;
+
+		encx24j600_cmd(priv, SETPKTDEC);
+		encx24j600_write_reg(priv, ERXTAIL, newrxtail);
+	}
+}
+
+static irqreturn_t encx24j600_isr(int irq, void *dev_id)
+{
+	struct encx24j600_priv *priv = dev_id;
+	struct net_device *dev = priv->ndev;
+	int eir;
+
+	/* Clear interrupts */
+	encx24j600_cmd(priv, CLREIE);
+
+	eir = encx24j600_read_reg(priv, EIR);
+
+	if (eir & LINKIF)
+		encx24j600_int_link_handler(priv);
+
+	if (eir & TXIF)
+		encx24j600_tx_complete(priv, false);
+
+	if (eir & TXABTIF)
+		encx24j600_tx_complete(priv, true);
+
+	if (eir & RXABTIF) {
+		if (eir & PCFULIF) {
+			/* Packet counter is full */
+			netif_err(priv, rx_err, dev, "Packet counter full\n");
+		}
+		dev->stats.rx_dropped++;
+		encx24j600_clr_bits(priv, EIR, RXABTIF);
+	}
+
+	if (eir & PKTIF) {
+		u8 packet_count;
+
+		mutex_lock(&priv->lock);
+
+		packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
+		while (packet_count) {
+			encx24j600_rx_packets(priv, packet_count);
+			packet_count = encx24j600_read_reg(priv, ESTAT) & 0xff;
+		}
+
+		mutex_unlock(&priv->lock);
+	}
+
+	/* Enable interrupts */
+	encx24j600_cmd(priv, SETEIE);
+
+	return IRQ_HANDLED;
+}
+
+static int encx24j600_soft_reset(struct encx24j600_priv *priv)
+{
+	int ret = 0;
+	int timeout;
+	u16 eudast;
+
+	/* Write and verify a test value to EUDAST */
+	regcache_cache_bypass(priv->ctx.regmap, true);
+	timeout = 10;
+	do {
+		encx24j600_write_reg(priv, EUDAST, EUDAST_TEST_VAL);
+		eudast = encx24j600_read_reg(priv, EUDAST);
+		usleep_range(25, 100);
+	} while ((eudast != EUDAST_TEST_VAL) && --timeout);
+	regcache_cache_bypass(priv->ctx.regmap, false);
+
+	if (timeout == 0) {
+		ret = -ETIMEDOUT;
+		goto err_out;
+	}
+
+	/* Wait for CLKRDY to become set */
+	timeout = 10;
+	while (!(encx24j600_read_reg(priv, ESTAT) & CLKRDY) && --timeout)
+		usleep_range(25, 100);
+
+	if (timeout == 0) {
+		ret = -ETIMEDOUT;
+		goto err_out;
+	}
+
+	/* Issue a System Reset command */
+	encx24j600_cmd(priv, SETETHRST);
+	usleep_range(25, 100);
+
+	/* Confirm that EUDAST has 0000h after system reset */
+	if (encx24j600_read_reg(priv, EUDAST) != 0) {
+		ret = -EINVAL;
+		goto err_out;
+	}
+
+	/* Wait for PHY register and status bits to become available */
+	usleep_range(256, 1000);
+
+err_out:
+	return ret;
+}
+
+static int encx24j600_hw_reset(struct encx24j600_priv *priv)
+{
+	int ret;
+
+	mutex_lock(&priv->lock);
+	ret = encx24j600_soft_reset(priv);
+	mutex_unlock(&priv->lock);
+
+	return ret;
+}
+
+static void encx24j600_reset_hw_tx(struct encx24j600_priv *priv)
+{
+	encx24j600_set_bits(priv, ECON2, TXRST);
+	encx24j600_clr_bits(priv, ECON2, TXRST);
+}
+
+static void encx24j600_hw_init_tx(struct encx24j600_priv *priv)
+{
+	/* Reset TX */
+	encx24j600_reset_hw_tx(priv);
+
+	/* Clear the TXIF flag if were previously set */
+	encx24j600_clr_bits(priv, EIR, TXIF | TXABTIF);
+
+	/* Write the Tx Buffer pointer */
+	encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
+}
+
+static void encx24j600_hw_init_rx(struct encx24j600_priv *priv)
+{
+	encx24j600_cmd(priv, DISABLERX);
+
+	/* Set up RX packet start address in the SRAM */
+	encx24j600_write_reg(priv, ERXST, ENC_RX_BUF_START);
+
+	/* Preload the RX Data pointer to the beginning of the RX area */
+	encx24j600_write_reg(priv, ERXRDPT, ENC_RX_BUF_START);
+
+	priv->next_packet = ENC_RX_BUF_START;
+
+	/* Set up RX end address in the SRAM */
+	encx24j600_write_reg(priv, ERXTAIL, ENC_SRAM_SIZE - 2);
+
+	/* Reset the  user data pointers    */
+	encx24j600_write_reg(priv, EUDAST, ENC_SRAM_SIZE);
+	encx24j600_write_reg(priv, EUDAND, ENC_SRAM_SIZE + 1);
+
+	/* Set Max Frame length */
+	encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
+}
+
+static void encx24j600_dump_config(struct encx24j600_priv *priv,
+				   const char *msg)
+{
+	pr_info(DRV_NAME ": %s\n", msg);
+
+	/* CHIP configuration */
+	pr_info(DRV_NAME " ECON1:   %04X\n", encx24j600_read_reg(priv, ECON1));
+	pr_info(DRV_NAME " ECON2:   %04X\n", encx24j600_read_reg(priv, ECON2));
+	pr_info(DRV_NAME " ERXFCON: %04X\n", encx24j600_read_reg(priv,
+								 ERXFCON));
+	pr_info(DRV_NAME " ESTAT:   %04X\n", encx24j600_read_reg(priv, ESTAT));
+	pr_info(DRV_NAME " EIR:     %04X\n", encx24j600_read_reg(priv, EIR));
+	pr_info(DRV_NAME " EIDLED:  %04X\n", encx24j600_read_reg(priv, EIDLED));
+
+	/* MAC layer configuration */
+	pr_info(DRV_NAME " MACON1:  %04X\n", encx24j600_read_reg(priv, MACON1));
+	pr_info(DRV_NAME " MACON2:  %04X\n", encx24j600_read_reg(priv, MACON2));
+	pr_info(DRV_NAME " MAIPG:   %04X\n", encx24j600_read_reg(priv, MAIPG));
+	pr_info(DRV_NAME " MACLCON: %04X\n", encx24j600_read_reg(priv,
+								 MACLCON));
+	pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv,
+								 MABBIPG));
+
+	/* PHY configuation */
+	pr_info(DRV_NAME " PHCON1:  %04X\n", encx24j600_read_phy(priv, PHCON1));
+	pr_info(DRV_NAME " PHCON2:  %04X\n", encx24j600_read_phy(priv, PHCON2));
+	pr_info(DRV_NAME " PHANA:   %04X\n", encx24j600_read_phy(priv, PHANA));
+	pr_info(DRV_NAME " PHANLPA: %04X\n", encx24j600_read_phy(priv,
+								 PHANLPA));
+	pr_info(DRV_NAME " PHANE:   %04X\n", encx24j600_read_phy(priv, PHANE));
+	pr_info(DRV_NAME " PHSTAT1: %04X\n", encx24j600_read_phy(priv,
+								 PHSTAT1));
+	pr_info(DRV_NAME " PHSTAT2: %04X\n", encx24j600_read_phy(priv,
+								 PHSTAT2));
+	pr_info(DRV_NAME " PHSTAT3: %04X\n", encx24j600_read_phy(priv,
+								 PHSTAT3));
+}
+
+static void encx24j600_set_rxfilter_mode(struct encx24j600_priv *priv)
+{
+	switch (priv->rxfilter) {
+	case RXFILTER_PROMISC:
+		encx24j600_set_bits(priv, MACON1, PASSALL);
+		encx24j600_write_reg(priv, ERXFCON, UCEN | MCEN | NOTMEEN);
+		break;
+	case RXFILTER_MULTI:
+		encx24j600_clr_bits(priv, MACON1, PASSALL);
+		encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN | MCEN);
+		break;
+	case RXFILTER_NORMAL:
+	default:
+		encx24j600_clr_bits(priv, MACON1, PASSALL);
+		encx24j600_write_reg(priv, ERXFCON, UCEN | CRCEN | BCEN);
+		break;
+	}
+}
+
+static int encx24j600_hw_init(struct encx24j600_priv *priv)
+{
+	struct net_device *dev = priv->ndev;
+	int ret = 0;
+	u16 eidled;
+	u16 macon2;
+
+	priv->hw_enabled = false;
+
+	eidled = encx24j600_read_reg(priv, EIDLED);
+	if (((eidled & DEVID_MASK) >> DEVID_SHIFT) != ENCX24J600_DEV_ID) {
+		ret = -EINVAL;
+		goto err_out;
+	}
+
+	netif_info(priv, drv, dev, "Silicon rev ID: 0x%02x\n",
+		   (eidled & REVID_MASK) >> REVID_SHIFT);
+
+	/* PHY Leds: link status,
+	 * LEDA: Link State + collision events
+	 * LEDB: Link State + transmit/receive events
+	 */
+	encx24j600_update_reg(priv, EIDLED, 0xff00, 0xcb00);
+
+	/* Loopback disabled */
+	encx24j600_write_reg(priv, MACON1, 0x9);
+
+	/* interpacket gap value */
+	encx24j600_write_reg(priv, MAIPG, 0x0c12);
+
+	/* Write the auto negotiation pattern */
+	encx24j600_write_phy(priv, PHANA, PHANA_DEFAULT);
+
+	encx24j600_update_phcon1(priv);
+	encx24j600_check_link_status(priv);
+
+	macon2 = MACON2_RSV1 | TXCRCEN | PADCFG0 | PADCFG2 | MACON2_DEFER;
+	if ((priv->autoneg == AUTONEG_DISABLE) && priv->full_duplex)
+		macon2 |= FULDPX;
+
+	encx24j600_set_bits(priv, MACON2, macon2);
+
+	priv->rxfilter = RXFILTER_NORMAL;
+	encx24j600_set_rxfilter_mode(priv);
+
+	/* Program the Maximum frame length */
+	encx24j600_write_reg(priv, MAMXFL, MAX_FRAMELEN);
+
+	/* Init Tx pointers */
+	encx24j600_hw_init_tx(priv);
+
+	/* Init Rx pointers */
+	encx24j600_hw_init_rx(priv);
+
+	if (netif_msg_hw(priv))
+		encx24j600_dump_config(priv, "Hw is initialized");
+
+err_out:
+	return ret;
+}
+
+static void encx24j600_hw_enable(struct encx24j600_priv *priv)
+{
+	/* Clear the interrupt flags in case was set */
+	encx24j600_clr_bits(priv, EIR, (PCFULIF | RXABTIF | TXABTIF | TXIF |
+					PKTIF | LINKIF));
+
+	/* Enable the interrupts */
+	encx24j600_write_reg(priv, EIE, (PCFULIE | RXABTIE | TXABTIE | TXIE |
+					 PKTIE | LINKIE | INTIE));
+
+	/* Enable RX */
+	encx24j600_cmd(priv, ENABLERX);
+
+	priv->hw_enabled = true;
+}
+
+static void encx24j600_hw_disable(struct encx24j600_priv *priv)
+{
+	/* Disable all interrupts */
+	encx24j600_write_reg(priv, EIE, 0);
+
+	/* Disable RX */
+	encx24j600_cmd(priv, DISABLERX);
+
+	priv->hw_enabled = false;
+}
+
+static int encx24j600_setlink(struct net_device *dev, u8 autoneg, u16 speed,
+			      u8 duplex)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+	int ret = 0;
+
+	if (!priv->hw_enabled) {
+		/* link is in low power mode now; duplex setting
+		 * will take effect on next encx24j600_hw_init()
+		 */
+		if (speed == SPEED_10 || speed == SPEED_100) {
+			priv->autoneg = (autoneg == AUTONEG_ENABLE);
+			priv->full_duplex = (duplex == DUPLEX_FULL);
+			priv->speed = (speed == SPEED_100);
+		} else {
+			netif_warn(priv, link, dev, "unsupported link speed setting\n");
+			/*speeds other than SPEED_10 and SPEED_100 */
+			/*are not supported by chip */
+			ret = -EOPNOTSUPP;
+		}
+	} else {
+		netif_warn(priv, link, dev, "Warning: hw must be disabled to set link mode\n");
+		ret = -EBUSY;
+	}
+	return ret;
+}
+
+static void encx24j600_hw_get_macaddr(struct encx24j600_priv *priv,
+				      unsigned char *ethaddr)
+{
+	unsigned short val;
+
+	val = encx24j600_read_reg(priv, MAADR1);
+
+	ethaddr[0] = val & 0x00ff;
+	ethaddr[1] = (val & 0xff00) >> 8;
+
+	val = encx24j600_read_reg(priv, MAADR2);
+
+	ethaddr[2] = val & 0x00ffU;
+	ethaddr[3] = (val & 0xff00U) >> 8;
+
+	val = encx24j600_read_reg(priv, MAADR3);
+
+	ethaddr[4] = val & 0x00ffU;
+	ethaddr[5] = (val & 0xff00U) >> 8;
+}
+
+/* Program the hardware MAC address from dev->dev_addr.*/
+static int encx24j600_set_hw_macaddr(struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+
+	if (priv->hw_enabled) {
+		netif_info(priv, drv, dev, "Hardware must be disabled to set Mac address\n");
+		return -EBUSY;
+	}
+
+	mutex_lock(&priv->lock);
+
+	netif_info(priv, drv, dev, "%s: Setting MAC address to %pM\n",
+		   dev->name, dev->dev_addr);
+
+	encx24j600_write_reg(priv, MAADR3, (dev->dev_addr[4] |
+			     dev->dev_addr[5] << 8));
+	encx24j600_write_reg(priv, MAADR2, (dev->dev_addr[2] |
+			     dev->dev_addr[3] << 8));
+	encx24j600_write_reg(priv, MAADR1, (dev->dev_addr[0] |
+			     dev->dev_addr[1] << 8));
+
+	mutex_unlock(&priv->lock);
+
+	return 0;
+}
+
+/* Store the new hardware address in dev->dev_addr, and update the MAC.*/
+static int encx24j600_set_mac_address(struct net_device *dev, void *addr)
+{
+	struct sockaddr *address = addr;
+
+	if (netif_running(dev))
+		return -EBUSY;
+	if (!is_valid_ether_addr(address->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+	return encx24j600_set_hw_macaddr(dev);
+}
+
+static int encx24j600_open(struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+
+	int ret = request_threaded_irq(priv->ctx.spi->irq, NULL, encx24j600_isr,
+				       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+				       DRV_NAME, priv);
+	if (unlikely(ret < 0)) {
+		netdev_err(dev, "request irq %d failed (ret = %d)\n",
+			   priv->ctx.spi->irq, ret);
+		return ret;
+	}
+
+	encx24j600_hw_disable(priv);
+	encx24j600_hw_init(priv);
+	encx24j600_hw_enable(priv);
+	netif_start_queue(dev);
+
+	return 0;
+}
+
+static int encx24j600_stop(struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	free_irq(priv->ctx.spi->irq, priv);
+	return 0;
+}
+
+static void encx24j600_setrx_proc(struct kthread_work *ws)
+{
+	struct encx24j600_priv *priv =
+			container_of(ws, struct encx24j600_priv, setrx_work);
+
+	mutex_lock(&priv->lock);
+	encx24j600_set_rxfilter_mode(priv);
+	mutex_unlock(&priv->lock);
+}
+
+static void encx24j600_set_multicast_list(struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+	int oldfilter = priv->rxfilter;
+
+	if (dev->flags & IFF_PROMISC) {
+		netif_dbg(priv, link, dev, "promiscuous mode\n");
+		priv->rxfilter = RXFILTER_PROMISC;
+	} else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
+		netif_dbg(priv, link, dev, "%smulticast mode\n",
+			  (dev->flags & IFF_ALLMULTI) ? "all-" : "");
+		priv->rxfilter = RXFILTER_MULTI;
+	} else {
+		netif_dbg(priv, link, dev, "normal mode\n");
+		priv->rxfilter = RXFILTER_NORMAL;
+	}
+
+	if (oldfilter != priv->rxfilter)
+		queue_kthread_work(&priv->kworker, &priv->setrx_work);
+}
+
+static void encx24j600_hw_tx(struct encx24j600_priv *priv)
+{
+	struct net_device *dev = priv->ndev;
+	netif_info(priv, tx_queued, dev, "TX Packet Len:%d\n",
+		   priv->tx_skb->len);
+
+	if (netif_msg_pktdata(priv))
+		dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data);
+
+	if (encx24j600_read_reg(priv, EIR) & TXABTIF)
+		/* Last transmition aborted due to error. Reset TX interface */
+		encx24j600_reset_hw_tx(priv);
+
+	/* Clear the TXIF flag if were previously set */
+	encx24j600_clr_bits(priv, EIR, TXIF);
+
+	/* Set the data pointer to the TX buffer address in the SRAM */
+	encx24j600_write_reg(priv, EGPWRPT, ENC_TX_BUF_START);
+
+	/* Copy the packet into the SRAM */
+	encx24j600_raw_write(priv, WGPDATA, (u8 *)priv->tx_skb->data,
+			     priv->tx_skb->len);
+
+	/* Program the Tx buffer start pointer */
+	encx24j600_write_reg(priv, ETXST, ENC_TX_BUF_START);
+
+	/* Program the packet length */
+	encx24j600_write_reg(priv, ETXLEN, priv->tx_skb->len);
+
+	/* Start the transmission */
+	encx24j600_cmd(priv, SETTXRTS);
+}
+
+static void encx24j600_tx_proc(struct kthread_work *ws)
+{
+	struct encx24j600_priv *priv =
+			container_of(ws, struct encx24j600_priv, tx_work);
+
+	mutex_lock(&priv->lock);
+	encx24j600_hw_tx(priv);
+	mutex_unlock(&priv->lock);
+}
+
+static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	/* save the timestamp */
+	dev->trans_start = jiffies;
+
+	/* Remember the skb for deferred processing */
+	priv->tx_skb = skb;
+
+	queue_kthread_work(&priv->kworker, &priv->tx_work);
+
+	return NETDEV_TX_OK;
+}
+
+/* Deal with a transmit timeout */
+static void encx24j600_tx_timeout(struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+
+	netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n",
+		  jiffies, jiffies - dev->trans_start);
+
+	dev->stats.tx_errors++;
+	netif_wake_queue(dev);
+	return;
+}
+
+static int encx24j600_get_regs_len(struct net_device *dev)
+{
+	return SFR_REG_COUNT;
+}
+
+static void encx24j600_get_regs(struct net_device *dev,
+				struct ethtool_regs *regs, void *p)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+	u16 *buff = p;
+	u8 reg;
+
+	regs->version = 1;
+	mutex_lock(&priv->lock);
+	for (reg = 0; reg < SFR_REG_COUNT; reg += 2) {
+		unsigned int val = 0;
+		/* ignore errors for unreadable registers */
+		regmap_read(priv->ctx.regmap, reg, &val);
+		buff[reg] = val & 0xffff;
+	}
+	mutex_unlock(&priv->lock);
+}
+
+static void encx24j600_get_drvinfo(struct net_device *dev,
+				   struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+	strlcpy(info->bus_info, dev_name(dev->dev.parent),
+		sizeof(info->bus_info));
+}
+
+static int encx24j600_get_settings(struct net_device *dev,
+				   struct ethtool_cmd *cmd)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+
+	cmd->transceiver = XCVR_INTERNAL;
+	cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+			 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
+			 SUPPORTED_Autoneg | SUPPORTED_TP;
+
+	ethtool_cmd_speed_set(cmd, priv->speed);
+	cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+	cmd->port = PORT_TP;
+	cmd->autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+	return 0;
+}
+
+static int encx24j600_set_settings(struct net_device *dev,
+				   struct ethtool_cmd *cmd)
+{
+	return encx24j600_setlink(dev, cmd->autoneg,
+				  ethtool_cmd_speed(cmd), cmd->duplex);
+}
+
+static u32 encx24j600_get_msglevel(struct net_device *dev)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+	return priv->msg_enable;
+}
+
+static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
+{
+	struct encx24j600_priv *priv = netdev_priv(dev);
+	priv->msg_enable = val;
+}
+
+static const struct ethtool_ops encx24j600_ethtool_ops = {
+	.get_settings = encx24j600_get_settings,
+	.set_settings = encx24j600_set_settings,
+	.get_drvinfo = encx24j600_get_drvinfo,
+	.get_msglevel = encx24j600_get_msglevel,
+	.set_msglevel = encx24j600_set_msglevel,
+	.get_regs_len = encx24j600_get_regs_len,
+	.get_regs = encx24j600_get_regs,
+};
+
+static const struct net_device_ops encx24j600_netdev_ops = {
+	.ndo_open = encx24j600_open,
+	.ndo_stop = encx24j600_stop,
+	.ndo_start_xmit = encx24j600_tx,
+	.ndo_set_rx_mode = encx24j600_set_multicast_list,
+	.ndo_set_mac_address = encx24j600_set_mac_address,
+	.ndo_tx_timeout = encx24j600_tx_timeout,
+	.ndo_validate_addr = eth_validate_addr,
+};
+
+static int encx24j600_spi_probe(struct spi_device *spi)
+{
+	int ret;
+
+	struct net_device *ndev;
+	struct encx24j600_priv *priv;
+
+	ndev = alloc_etherdev(sizeof(struct encx24j600_priv));
+
+	if (!ndev) {
+		ret = -ENOMEM;
+		goto error_out;
+	}
+
+	priv = netdev_priv(ndev);
+	spi_set_drvdata(spi, priv);
+	dev_set_drvdata(&spi->dev, priv);
+	SET_NETDEV_DEV(ndev, &spi->dev);
+
+	priv->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+	priv->ndev = ndev;
+
+	/* Default configuration PHY configuration */
+	priv->full_duplex = true;
+	priv->autoneg = AUTONEG_ENABLE;
+	priv->speed = SPEED_100;
+
+	priv->ctx.spi = spi;
+	devm_regmap_init_encx24j600(&spi->dev, &priv->ctx);
+	ndev->irq = spi->irq;
+	ndev->netdev_ops = &encx24j600_netdev_ops;
+
+	mutex_init(&priv->lock);
+
+	/* Reset device and check if it is connected */
+	if (encx24j600_hw_reset(priv)) {
+		netif_err(priv, probe, ndev,
+			  DRV_NAME ": Chip is not detected\n");
+		ret = -EIO;
+		goto out_free;
+	}
+
+	/* Initialize the device HW to the consistent state */
+	if (encx24j600_hw_init(priv)) {
+		netif_err(priv, probe, ndev,
+			  DRV_NAME ": HW initialization error\n");
+		ret = -EIO;
+		goto out_free;
+	}
+
+	init_kthread_worker(&priv->kworker);
+	init_kthread_work(&priv->tx_work, encx24j600_tx_proc);
+	init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc);
+
+	priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
+					 "encx24j600");
+
+	if (IS_ERR(priv->kworker_task)) {
+		ret = PTR_ERR(priv->kworker_task);
+		goto out_free;
+	}
+
+	/* Get the MAC address from the chip */
+	encx24j600_hw_get_macaddr(priv, ndev->dev_addr);
+
+	ndev->ethtool_ops = &encx24j600_ethtool_ops;
+
+	ret = register_netdev(ndev);
+	if (unlikely(ret)) {
+		netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
+			  ret);
+		goto out_free;
+	}
+
+	netif_info(priv, drv, priv->ndev, "MAC address %pM\n", ndev->dev_addr);
+
+	return ret;
+
+out_free:
+	free_netdev(ndev);
+
+error_out:
+	return ret;
+}
+
+static int encx24j600_spi_remove(struct spi_device *spi)
+{
+	struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
+
+	unregister_netdev(priv->ndev);
+
+	free_netdev(priv->ndev);
+
+	return 0;
+}
+
+static const struct spi_device_id encx24j600_spi_id_table = {
+	.name = "encx24j600"
+};
+
+static struct spi_driver encx24j600_spi_net_driver = {
+	.driver = {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+		.bus	= &spi_bus_type,
+	},
+	.probe		= encx24j600_spi_probe,
+	.remove		= encx24j600_spi_remove,
+	.id_table	= &encx24j600_spi_id_table,
+};
+
+static int __init encx24j600_init(void)
+{
+	return spi_register_driver(&encx24j600_spi_net_driver);
+}
+module_init(encx24j600_init);
+
+static void encx24j600_exit(void)
+{
+	spi_unregister_driver(&encx24j600_spi_net_driver);
+}
+module_exit(encx24j600_exit);
+
+MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
+MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
new file mode 100644
index 0000000..4be73d5
--- /dev/null
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -0,0 +1,437 @@
+/**
+ * encx24j600_hw.h: Register definitions
+ *
+ */
+
+#ifndef _ENCX24J600_HW_H
+#define _ENCX24J600_HW_H
+
+struct encx24j600_context {
+	struct spi_device *spi;
+	struct regmap *regmap;
+	struct regmap *phymap;
+	struct mutex mutex; /* mutex to protect access to regmap */
+	int bank;
+};
+
+void devm_regmap_init_encx24j600(struct device *dev,
+				 struct encx24j600_context *ctx);
+
+/* Single-byte instructions */
+#define BANK_SELECT(bank) (0xC0 | ((bank & (BANK_MASK >> BANK_SHIFT)) << 1))
+#define B0SEL 0xC0		/* Bank 0 Select */
+#define B1SEL 0xC2		/* Bank 1 Select */
+#define B2SEL 0xC4		/* Bank 2 Select */
+#define B3SEL 0xC6		/* Bank 3 Select */
+#define SETETHRST 0xCA		/* System Reset */
+#define FCDISABLE 0xE0		/* Flow Control Disable */
+#define FCSINGLE 0xE2		/* Flow Control Single */
+#define FCMULTIPLE 0xE4		/* Flow Control Multiple */
+#define FCCLEAR 0xE6		/* Flow Control Clear */
+#define SETPKTDEC 0xCC		/* Decrement Packet Counter */
+#define DMASTOP 0xD2		/* DMA Stop */
+#define DMACKSUM 0xD8		/* DMA Start Checksum */
+#define DMACKSUMS 0xDA		/* DMA Start Checksum with Seed */
+#define DMACOPY 0xDC		/* DMA Start Copy */
+#define DMACOPYS 0xDE		/* DMA Start Copy and Checksum with Seed */
+#define SETTXRTS 0xD4		/* Request Packet Transmission */
+#define ENABLERX 0xE8		/* Enable RX */
+#define DISABLERX 0xEA		/* Disable RX */
+#define SETEIE 0xEC		/* Enable Interrupts */
+#define CLREIE 0xEE		/* Disable Interrupts */
+
+/* Two byte instructions */
+#define RBSEL 0xC8		/* Read Bank Select */
+
+/* Three byte instructions */
+#define WGPRDPT 0x60		/* Write EGPRDPT */
+#define RGPRDPT 0x62		/* Read EGPRDPT */
+#define WRXRDPT 0x64		/* Write ERXRDPT */
+#define RRXRDPT 0x66		/* Read ERXRDPT */
+#define WUDARDPT 0x68		/* Write EUDARDPT */
+#define RUDARDPT 0x6A		/* Read EUDARDPT */
+#define WGPWRPT 0x6C		/* Write EGPWRPT */
+#define RGPWRPT 0x6E		/* Read EGPWRPT */
+#define WRXWRPT 0x70		/* Write ERXWRPT */
+#define RRXWRPT 0x72		/* Read ERXWRPT */
+#define WUDAWRPT 0x74		/* Write EUDAWRPT */
+#define RUDAWRPT 0x76		/* Read EUDAWRPT */
+
+/* n byte instructions */
+#define RCRCODE 0x00
+#define WCRCODE 0x40
+#define BFSCODE 0x80
+#define BFCCODE 0xA0
+#define RCR(addr) (RCRCODE | (addr & ADDR_MASK)) /* Read Control Register */
+#define WCR(addr) (WCRCODE | (addr & ADDR_MASK)) /* Write Control Register */
+#define RCRU 0x20		/* Read Control Register Unbanked */
+#define WCRU 0x22		/* Write Control Register Unbanked */
+#define BFS(addr) (BFSCODE | (addr & ADDR_MASK)) /* Bit Field Set */
+#define BFC(addr) (BFCCODE | (addr & ADDR_MASK)) /* Bit Field Clear */
+#define BFSU 0x24		/* Bit Field Set Unbanked */
+#define BFCU 0x26		/* Bit Field Clear Unbanked */
+#define RGPDATA 0x28		/* Read EGPDATA */
+#define WGPDATA 0x2A		/* Write EGPDATA */
+#define RRXDATA 0x2C		/* Read ERXDATA */
+#define WRXDATA 0x2E		/* Write ERXDATA */
+#define RUDADATA 0x30		/* Read EUDADATA */
+#define WUDADATA 0x32		/* Write EUDADATA */
+
+#define SFR_REG_COUNT	0xA0
+
+/* ENC424J600 Control Registers
+ * Control register definitions are a combination of address
+ * and bank number
+ * - Register address (bits 0-4)
+ * - Bank number (bits 5-6)
+ */
+#define ADDR_MASK 0x1F
+#define BANK_MASK 0x60
+#define BANK_SHIFT 5
+
+/* All-bank registers */
+#define EUDAST 0x16
+#define EUDAND 0x18
+#define ESTAT 0x1A
+#define EIR 0x1C
+#define ECON1 0x1E
+
+/* Bank 0 registers */
+#define ETXST (0x00 | 0x00)
+#define ETXLEN (0x02 | 0x00)
+#define ERXST (0x04 | 0x00)
+#define ERXTAIL (0x06 | 0x00)
+#define ERXHEAD (0x08 | 0x00)
+#define EDMAST (0x0A | 0x00)
+#define EDMALEN (0x0C | 0x00)
+#define EDMADST (0x0E | 0x00)
+#define EDMACS (0x10 | 0x00)
+#define ETXSTAT (0x12 | 0x00)
+#define ETXWIRE (0x14 | 0x00)
+
+/* Bank 1 registers */
+#define EHT1 (0x00 | 0x20)
+#define EHT2 (0x02 | 0x20)
+#define EHT3 (0x04 | 0x20)
+#define EHT4 (0x06 | 0x20)
+#define EPMM1 (0x08 | 0x20)
+#define EPMM2 (0x0A | 0x20)
+#define EPMM3 (0x0C | 0x20)
+#define EPMM4 (0x0E | 0x20)
+#define EPMCS (0x10 | 0x20)
+#define EPMO (0x12 | 0x20)
+#define ERXFCON (0x14 | 0x20)
+
+/* Bank 2 registers */
+#define MACON1 (0x00 | 0x40)
+#define MACON2 (0x02 | 0x40)
+#define MABBIPG (0x04 | 0x40)
+#define MAIPG (0x06 | 0x40)
+#define MACLCON (0x08 | 0x40)
+#define MAMXFL (0x0A | 0x40)
+#define MICMD (0x12 | 0x40)
+#define MIREGADR (0x14 | 0x40)
+
+/* Bank 3 registers */
+#define MAADR3 (0x00 | 0x60)
+#define MAADR2 (0x02 | 0x60)
+#define MAADR1 (0x04 | 0x60)
+#define MIWR (0x06 | 0x60)
+#define MIRD (0x08 | 0x60)
+#define MISTAT (0x0A | 0x60)
+#define EPAUS (0x0C | 0x60)
+#define ECON2 (0x0E | 0x60)
+#define ERXWM (0x10 | 0x60)
+#define EIE (0x12 | 0x60)
+#define EIDLED (0x14 | 0x60)
+
+/* Unbanked registers */
+#define EGPDATA (0x00 | 0x80)
+#define ERXDATA (0x02 | 0x80)
+#define EUDADATA (0x04 | 0x80)
+#define EGPRDPT (0x06 | 0x80)
+#define EGPWRPT (0x08 | 0x80)
+#define ERXRDPT (0x0A | 0x80)
+#define ERXWRPT (0x0C | 0x80)
+#define EUDARDPT (0x0E | 0x80)
+#define EUDAWRPT (0x10 | 0x80)
+
+
+/* Register bit definitions */
+/* ESTAT */
+#define INT (1 << 15)
+#define FCIDLE (1 << 14)
+#define RXBUSY (1 << 13)
+#define CLKRDY (1 << 12)
+#define PHYDPX (1 << 10)
+#define PHYLNK (1 << 8)
+
+/* EIR */
+#define CRYPTEN (1 << 15)
+#define MODEXIF (1 << 14)
+#define HASHIF (1 << 13)
+#define AESIF (1 << 12)
+#define LINKIF (1 << 11)
+#define PKTIF (1 << 6)
+#define DMAIF (1 << 5)
+#define TXIF (1 << 3)
+#define TXABTIF (1 << 2)
+#define RXABTIF (1 << 1)
+#define PCFULIF (1 << 0)
+
+/* ECON1 */
+#define MODEXST (1 << 15)
+#define HASHEN (1 << 14)
+#define HASHOP (1 << 13)
+#define HASHLST (1 << 12)
+#define AESST (1 << 11)
+#define AESOP1 (1 << 10)
+#define AESOP0 (1 << 9)
+#define PKTDEC (1 << 8)
+#define FCOP1 (1 << 7)
+#define FCOP0 (1 << 6)
+#define DMAST (1 << 5)
+#define DMACPY (1 << 4)
+#define DMACSSD (1 << 3)
+#define DMANOCS (1 << 2)
+#define TXRTS (1 << 1)
+#define RXEN (1 << 0)
+
+/* ETXSTAT */
+#define LATECOL (1 << 10)
+#define MAXCOL (1 << 9)
+#define EXDEFER (1 << 8)
+#define ETXSTATL_DEFER (1 << 7)
+#define CRCBAD (1 << 4)
+#define COLCNT_MASK 0xF
+
+/* ERXFCON */
+#define HTEN (1 << 15)
+#define MPEN (1 << 14)
+#define NOTPM (1 << 12)
+#define PMEN3 (1 << 11)
+#define PMEN2 (1 << 10)
+#define PMEN1 (1 << 9)
+#define PMEN0 (1 << 8)
+#define CRCEEN (1 << 7)
+#define CRCEN (1 << 6)
+#define RUNTEEN (1 << 5)
+#define RUNTEN (1 << 4)
+#define UCEN (1 << 3)
+#define NOTMEEN (1 << 2)
+#define MCEN (1 << 1)
+#define BCEN (1 << 0)
+
+/* MACON1 */
+#define LOOPBK (1 << 4)
+#define RXPAUS (1 << 2)
+#define PASSALL (1 << 1)
+
+/* MACON2 */
+#define MACON2_DEFER (1 << 14)
+#define BPEN (1 << 13)
+#define NOBKOFF (1 << 12)
+#define PADCFG2 (1 << 7)
+#define PADCFG1 (1 << 6)
+#define PADCFG0 (1 << 5)
+#define TXCRCEN (1 << 4)
+#define PHDREN (1 << 3)
+#define HFRMEN (1 << 2)
+#define MACON2_RSV1 (1 << 1)
+#define FULDPX (1 << 0)
+
+/* MAIPG */
+/* value of the high byte is given by the reserved bits,
+ * value of the low byte is recomended setting of the
+ * IPG parameter.
+ */
+#define MAIPGH_VAL 0x0C
+#define MAIPGL_VAL 0x12
+
+/* MIREGADRH */
+#define MIREGADR_VAL (1 << 8)
+
+/* MIREGADRL */
+#define PHREG_MASK 0x1F
+
+/* MICMD */
+#define MIISCAN (1 << 1)
+#define MIIRD (1 << 0)
+
+/* MISTAT */
+#define NVALID (1 << 2)
+#define SCAN (1 << 1)
+#define BUSY (1 << 0)
+
+/* ECON2 */
+#define ETHEN (1 << 15)
+#define STRCH (1 << 14)
+#define TXMAC (1 << 13)
+#define SHA1MD5 (1 << 12)
+#define COCON3 (1 << 11)
+#define COCON2 (1 << 10)
+#define COCON1 (1 << 9)
+#define COCON0 (1 << 8)
+#define AUTOFC (1 << 7)
+#define TXRST (1 << 6)
+#define RXRST (1 << 5)
+#define ETHRST (1 << 4)
+#define MODLEN1 (1 << 3)
+#define MODLEN0 (1 << 2)
+#define AESLEN1 (1 << 1)
+#define AESLEN0 (1 << 0)
+
+/* EIE */
+#define INTIE (1 << 15)
+#define MODEXIE (1 << 14)
+#define HASHIE (1 << 13)
+#define AESIE (1 << 12)
+#define LINKIE (1 << 11)
+#define PKTIE (1 << 6)
+#define DMAIE (1 << 5)
+#define TXIE (1 << 3)
+#define TXABTIE (1 << 2)
+#define RXABTIE (1 << 1)
+#define PCFULIE (1 << 0)
+
+/* EIDLED */
+#define LACFG3 (1 << 15)
+#define LACFG2 (1 << 14)
+#define LACFG1 (1 << 13)
+#define LACFG0 (1 << 12)
+#define LBCFG3 (1 << 11)
+#define LBCFG2 (1 << 10)
+#define LBCFG1 (1 << 9)
+#define LBCFG0 (1 << 8)
+#define DEVID_SHIFT 5
+#define DEVID_MASK (0x7 << DEVID_SHIFT)
+#define REVID_SHIFT 0
+#define REVID_MASK (0x1F << REVID_SHIFT)
+
+/* PHY registers */
+#define PHCON1 0x00
+#define PHSTAT1 0x01
+#define PHANA 0x04
+#define PHANLPA 0x05
+#define PHANE 0x06
+#define PHCON2 0x11
+#define PHSTAT2 0x1B
+#define PHSTAT3 0x1F
+
+/* PHCON1 */
+#define PRST (1 << 15)
+#define PLOOPBK (1 << 14)
+#define SPD100 (1 << 13)
+#define ANEN (1 << 12)
+#define PSLEEP (1 << 11)
+#define RENEG (1 << 9)
+#define PFULDPX (1 << 8)
+
+/* PHSTAT1 */
+#define FULL100 (1 << 14)
+#define HALF100 (1 << 13)
+#define FULL10 (1 << 12)
+#define HALF10 (1 << 11)
+#define ANDONE (1 << 5)
+#define LRFAULT (1 << 4)
+#define ANABLE (1 << 3)
+#define LLSTAT (1 << 2)
+#define EXTREGS (1 << 0)
+
+/* PHSTAT2 */
+#define PLRITY (1 << 4)
+
+/* PHSTAT3 */
+#define PHY3SPD100 (1 << 3)
+#define PHY3DPX (1 << 4)
+#define SPDDPX_SHIFT 2
+#define SPDDPX_MASK (0x7 << SPDDPX_SHIFT)
+
+/* PHANA */
+/* Default value for PHY initialization*/
+#define PHANA_DEFAULT 0x05E1
+
+/* PHANE */
+#define PDFLT (1 << 4)
+#define LPARCD (1 << 1)
+#define LPANABL (1 << 0)
+
+#define EUDAST_TEST_VAL 0x1234
+
+#define TSV_SIZE 7
+
+#define ENCX24J600_DEV_ID 0x1
+
+/* Configuration */
+
+/* Led is on when the link is present and driven low
+ * temporarily when packet is TX'd or RX'd
+ */
+#define LED_A_SETTINGS 0xC
+
+/* Led is on if the link is in 100 Mbps mode */
+#define LED_B_SETTINGS 0x8
+
+/* maximum ethernet frame length
+ * Currently not used as a limit anywhere
+ * (we're using the "huge frame enable" feature of
+ * enc424j600).
+ */
+#define MAX_FRAMELEN 1518
+
+/* Size in bytes of the receive buffer in enc424j600.
+ * Must be word aligned (even).
+ */
+#define RX_BUFFER_SIZE (15 * MAX_FRAMELEN)
+
+/* Start of the general purpose area in sram */
+#define SRAM_GP_START 0x0
+
+/* SRAM size */
+#define SRAM_SIZE 0x6000
+
+/* Start of the receive buffer */
+#define ERXST_VAL (SRAM_SIZE - RX_BUFFER_SIZE)
+
+#define RSV_RXLONGEVDROPEV	16
+#define RSV_CARRIEREV		18
+#define RSV_CRCERROR		20
+#define RSV_LENCHECKERR		21
+#define RSV_LENOUTOFRANGE	22
+#define RSV_RXOK		23
+#define RSV_RXMULTICAST		24
+#define RSV_RXBROADCAST		25
+#define RSV_DRIBBLENIBBLE	26
+#define RSV_RXCONTROLFRAME	27
+#define RSV_RXPAUSEFRAME	28
+#define RSV_RXUNKNOWNOPCODE	29
+#define RSV_RXTYPEVLAN		30
+
+#define RSV_RUNTFILTERMATCH	31
+#define RSV_NOTMEFILTERMATCH	32
+#define RSV_HASHFILTERMATCH	33
+#define RSV_MAGICPKTFILTERMATCH	34
+#define RSV_PTRNMTCHFILTERMATCH	35
+#define RSV_UNICASTFILTERMATCH	36
+
+#define RSV_SIZE		8
+#define RSV_BITMASK(x)		(1 << ((x) - 16))
+#define RSV_GETBIT(x, y)	(((x) & RSV_BITMASK(y)) ? 1 : 0)
+
+struct rsv {
+	u16 next_packet;
+	u16 len;
+	u32 rxstat;
+};
+
+/* Put RX buffer at 0 as suggested by the Errata datasheet */
+
+#define RXSTART_INIT		ERXST_VAL
+#define RXEND_INIT		0x5FFF
+
+int regmap_encx24j600_spi_write(void *context, u8 reg, const u8 *data,
+				size_t count);
+int regmap_encx24j600_spi_read(void *context, u8 reg, u8 *data, size_t count);
+
+
+#endif
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 2d1b942..9ba9758 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5389,8 +5389,6 @@
 	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
 	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
 	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
-	info->regdump_len = XENA_REG_SPACE;
-	info->eedump_len = XENA_EEPROM_SPACE;
 }
 
 /**
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index be916eb..9a29670 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -105,10 +105,6 @@
 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 	strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version));
 	strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
-	info->regdump_len = sizeof(struct vxge_hw_vpath_reg)
-				* vdev->no_of_vpath;
-
-	info->n_stats = STAT_LEN;
 }
 
 /**
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 66fd868..b159ef8 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -476,13 +476,12 @@
 	mac[5] = tmp >> 8;
 }
 
-static void __lpc_eth_clock_enable(struct netdata_local *pldat,
-				   bool enable)
+static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
 {
 	if (enable)
-		clk_enable(pldat->clk);
+		clk_prepare_enable(pldat->clk);
 	else
-		clk_disable(pldat->clk);
+		clk_disable_unprepare(pldat->clk);
 }
 
 static void __lpc_params_setup(struct netdata_local *pldat)
@@ -1494,7 +1493,7 @@
 err_out_iounmap:
 	iounmap(pldat->net_base);
 err_out_disable_clocks:
-	clk_disable(pldat->clk);
+	clk_disable_unprepare(pldat->clk);
 	clk_put(pldat->clk);
 err_out_free_dev:
 	free_netdev(ndev);
@@ -1519,7 +1518,7 @@
 	iounmap(pldat->net_base);
 	mdiobus_unregister(pldat->mii_bus);
 	mdiobus_free(pldat->mii_bus);
-	clk_disable(pldat->clk);
+	clk_disable_unprepare(pldat->clk);
 	clk_put(pldat->clk);
 	free_netdev(ndev);
 
@@ -1540,7 +1539,7 @@
 		if (netif_running(ndev)) {
 			netif_device_detach(ndev);
 			__lpc_eth_shutdown(pldat);
-			clk_disable(pldat->clk);
+			clk_disable_unprepare(pldat->clk);
 
 			/*
 			 * Reset again now clock is disable to be sure
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 7bf9c02..c177c7c 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1344,10 +1344,6 @@
 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
 	strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
-	info->n_stats = 0;
-	info->testinfo_len = 0;
-	info->regdump_len = 0;
-	info->eedump_len = 0;
 }
 
 static int octeon_mgmt_get_settings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index f6fcf74..b19be7c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -164,7 +164,6 @@
 	strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
 }
 
 /**
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index f1f0108..30a6f24 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -91,4 +91,15 @@
 	---help---
 	  This enables the support for NetXen's Gigabit Ethernet card.
 
+config QED
+	tristate "QLogic QED 25/40/100Gb core driver"
+	depends on PCI
+	---help---
+	  This enables the support for ...
+
+config QEDE
+	tristate "QLogic QED 25/40/100Gb Ethernet NIC"
+	depends on QED
+	---help---
+	  This enables the support for ...
 endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile
index b2a283d..cee90e0 100644
--- a/drivers/net/ethernet/qlogic/Makefile
+++ b/drivers/net/ethernet/qlogic/Makefile
@@ -6,3 +6,5 @@
 obj-$(CONFIG_QLCNIC) += qlcnic/
 obj-$(CONFIG_QLGE) += qlge/
 obj-$(CONFIG_NETXEN_NIC) += netxen/
+obj-$(CONFIG_QED) += qed/
+obj-$(CONFIG_QEDE)+= qede/
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 87e073c6..f903446 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -93,8 +93,6 @@
 
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
-	drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
 }
 
 static int
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
new file mode 100644
index 0000000..5c2fd57
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_QED) := qed.o
+
+qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
+	 qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
new file mode 100644
index 0000000..ac17d86
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -0,0 +1,496 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_H
+#define _QED_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/zlib.h>
+#include <linux/hashtable.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+
+extern const struct qed_common_ops qed_common_ops_pass;
+#define DRV_MODULE_VERSION "8.4.0.0"
+
+#define MAX_HWFNS_PER_DEVICE    (4)
+#define NAME_SIZE 16
+#define VER_SIZE 16
+
+/* cau states */
+enum qed_coalescing_mode {
+	QED_COAL_MODE_DISABLE,
+	QED_COAL_MODE_ENABLE
+};
+
+struct qed_eth_cb_ops;
+struct qed_dev_info;
+
+/* helpers */
+static inline u32 qed_db_addr(u32 cid, u32 DEMS)
+{
+	u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+		      FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
+
+	return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn)				     \
+	((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
+	 ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
+
+#define for_each_hwfn(cdev, i)  for (i = 0; i < cdev->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+	(val == (cond1) ? true1 :		      \
+	 (val == (cond2) ? true2 : def))
+
+/* forward */
+struct qed_ptt_pool;
+struct qed_spq;
+struct qed_sb_info;
+struct qed_sb_attn_info;
+struct qed_cxt_mngr;
+struct qed_sb_sp_info;
+struct qed_mcp_info;
+
+struct qed_rt_data {
+	u32 init_val;
+	bool b_valid;
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE protocol
+ */
+enum qed_pci_personality {
+	QED_PCI_ETH,
+	QED_PCI_DEFAULT /* default in shmem */
+};
+
+/* All VFs are symmetric, all counters are PF + all VFs */
+struct qed_qm_iids {
+	u32 cids;
+	u32 vf_cids;
+	u32 tids;
+};
+
+enum QED_RESOURCES {
+	QED_SB,
+	QED_L2_QUEUE,
+	QED_VPORT,
+	QED_RSS_ENG,
+	QED_PQ,
+	QED_RL,
+	QED_MAC,
+	QED_VLAN,
+	QED_ILT,
+	QED_MAX_RESC,
+};
+
+enum QED_FEATURE {
+	QED_PF_L2_QUE,
+	QED_MAX_FEATURES,
+};
+
+enum QED_PORT_MODE {
+	QED_PORT_MODE_DE_2X40G,
+	QED_PORT_MODE_DE_2X50G,
+	QED_PORT_MODE_DE_1X100G,
+	QED_PORT_MODE_DE_4X10G_F,
+	QED_PORT_MODE_DE_4X10G_E,
+	QED_PORT_MODE_DE_4X20G,
+	QED_PORT_MODE_DE_1X40G,
+	QED_PORT_MODE_DE_2X25G,
+	QED_PORT_MODE_DE_1X25G
+};
+
+struct qed_hw_info {
+	/* PCI personality */
+	enum qed_pci_personality	personality;
+
+	/* Resource Allocation scheme results */
+	u32				resc_start[QED_MAX_RESC];
+	u32				resc_num[QED_MAX_RESC];
+	u32				feat_num[QED_MAX_FEATURES];
+
+#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+	u8				num_tc;
+	u8				offload_tc;
+	u8				non_offload_tc;
+
+	u32				concrete_fid;
+	u16				opaque_fid;
+	u16				ovlan;
+	u32				part_num[4];
+
+	u32				vendor_id;
+	u32				device_id;
+
+	unsigned char			hw_mac_addr[ETH_ALEN];
+
+	struct qed_igu_info		*p_igu_info;
+
+	u32				port_mode;
+	u32				hw_mode;
+};
+
+struct qed_hw_cid_data {
+	u32	cid;
+	bool	b_cid_allocated;
+
+	/* Additional identifiers */
+	u16	opaque_fid;
+	u8	vport_id;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE        0x2000
+
+struct qed_dmae_info {
+	/* Mutex for synchronizing access to functions */
+	struct mutex	mutex;
+
+	u8		channel;
+
+	dma_addr_t	completion_word_phys_addr;
+
+	/* The memory location where the DMAE writes the completion
+	 * value when an operation is finished on this context.
+	 */
+	u32		*p_completion_word;
+
+	dma_addr_t	intermediate_buffer_phys_addr;
+
+	/* An intermediate buffer for DMAE operations that use virtual
+	 * addresses - data is DMA'd to/from this buffer and then
+	 * memcpy'd to/from the virtual address
+	 */
+	u32		*p_intermediate_buffer;
+
+	dma_addr_t	dmae_cmd_phys_addr;
+	struct dmae_cmd *p_dmae_cmd;
+};
+
+struct qed_qm_info {
+	struct init_qm_pq_params	*qm_pq_params;
+	struct init_qm_vport_params	*qm_vport_params;
+	struct init_qm_port_params	*qm_port_params;
+	u16				start_pq;
+	u8				start_vport;
+	u8				pure_lb_pq;
+	u8				offload_pq;
+	u8				pure_ack_pq;
+	u8				vf_queues_offset;
+	u16				num_pqs;
+	u16				num_vf_pqs;
+	u8				num_vports;
+	u8				max_phys_tcs_per_port;
+	bool				pf_rl_en;
+	bool				pf_wfq_en;
+	bool				vport_rl_en;
+	bool				vport_wfq_en;
+	u8				pf_wfq;
+	u32				pf_rl;
+};
+
+struct storm_stats {
+	u32     address;
+	u32     len;
+};
+
+struct qed_storm_stats {
+	struct storm_stats mstats;
+	struct storm_stats pstats;
+	struct storm_stats tstats;
+	struct storm_stats ustats;
+};
+
+struct qed_fw_data {
+	struct fw_ver_info	*fw_ver_info;
+	const u8		*modes_tree_buf;
+	union init_op		*init_ops;
+	const u32		*arr_data;
+	u32			init_ops_size;
+};
+
+struct qed_simd_fp_handler {
+	void	*token;
+	void	(*func)(void *);
+};
+
+struct qed_hwfn {
+	struct qed_dev			*cdev;
+	u8				my_id;          /* ID inside the PF */
+#define IS_LEAD_HWFN(edev)              (!((edev)->my_id))
+	u8				rel_pf_id;      /* Relative to engine*/
+	u8				abs_pf_id;
+#define QED_PATH_ID(_p_hwfn)		((_p_hwfn)->abs_pf_id & 1)
+	u8				port_id;
+	bool				b_active;
+
+	u32				dp_module;
+	u8				dp_level;
+	char				name[NAME_SIZE];
+
+	bool				first_on_engine;
+	bool				hw_init_done;
+
+	/* BAR access */
+	void __iomem			*regview;
+	void __iomem			*doorbells;
+	u64				db_phys_addr;
+	unsigned long			db_size;
+
+	/* PTT pool */
+	struct qed_ptt_pool		*p_ptt_pool;
+
+	/* HW info */
+	struct qed_hw_info		hw_info;
+
+	/* rt_array (for init-tool) */
+	struct qed_rt_data		*rt_data;
+
+	/* SPQ */
+	struct qed_spq			*p_spq;
+
+	/* EQ */
+	struct qed_eq			*p_eq;
+
+	/* Consolidate Q*/
+	struct qed_consq		*p_consq;
+
+	/* Slow-Path definitions */
+	struct tasklet_struct		*sp_dpc;
+	bool				b_sp_dpc_enabled;
+
+	struct qed_ptt			*p_main_ptt;
+	struct qed_ptt			*p_dpc_ptt;
+
+	struct qed_sb_sp_info		*p_sp_sb;
+	struct qed_sb_attn_info		*p_sb_attn;
+
+	/* Protocol related */
+	struct qed_pf_params		pf_params;
+
+	/* Array of sb_info of all status blocks */
+	struct qed_sb_info		*sbs_info[MAX_SB_PER_PF_MIMD];
+	u16				num_sbs;
+
+	struct qed_cxt_mngr		*p_cxt_mngr;
+
+	/* Flag indicating whether interrupts are enabled or not*/
+	bool				b_int_enabled;
+
+	struct qed_mcp_info		*mcp_info;
+
+	struct qed_hw_cid_data		*p_tx_cids;
+	struct qed_hw_cid_data		*p_rx_cids;
+
+	struct qed_dmae_info		dmae_info;
+
+	/* QM init */
+	struct qed_qm_info		qm_info;
+	struct qed_storm_stats		storm_stats;
+
+	/* Buffer for unzipping firmware data */
+	void				*unzip_buf;
+
+	struct qed_simd_fp_handler	simd_proto_handler[64];
+
+	struct z_stream_s		*stream;
+};
+
+struct pci_params {
+	int		pm_cap;
+
+	unsigned long	mem_start;
+	unsigned long	mem_end;
+	unsigned int	irq;
+	u8		pf_num;
+};
+
+struct qed_int_param {
+	u32	int_mode;
+	u8	num_vectors;
+	u8	min_msix_cnt; /* for minimal functionality */
+};
+
+struct qed_int_params {
+	struct qed_int_param	in;
+	struct qed_int_param	out;
+	struct msix_entry	*msix_table;
+	bool			fp_initialized;
+	u8			fp_msix_base;
+	u8			fp_msix_cnt;
+};
+
+struct qed_dev {
+	u32	dp_module;
+	u8	dp_level;
+	char	name[NAME_SIZE];
+
+	u8	type;
+#define QED_DEV_TYPE_BB_A0      (0 << 0)
+#define QED_DEV_TYPE_MASK       (0x3)
+#define QED_DEV_TYPE_SHIFT      (0)
+
+	u16	chip_num;
+#define CHIP_NUM_MASK                   0xffff
+#define CHIP_NUM_SHIFT                  16
+
+	u16	chip_rev;
+#define CHIP_REV_MASK                   0xf
+#define CHIP_REV_SHIFT                  12
+
+	u16				chip_metal;
+#define CHIP_METAL_MASK                 0xff
+#define CHIP_METAL_SHIFT                4
+
+	u16				chip_bond_id;
+#define CHIP_BOND_ID_MASK               0xf
+#define CHIP_BOND_ID_SHIFT              0
+
+	u8				num_engines;
+	u8				num_ports_in_engines;
+	u8				num_funcs_in_port;
+
+	u8				path_id;
+	enum mf_mode			mf_mode;
+#define IS_MF(_p_hwfn)          (((_p_hwfn)->cdev)->mf_mode != SF)
+#define IS_MF_SI(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == MF_NPAR)
+#define IS_MF_SD(_p_hwfn)       (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN)
+
+	int				pcie_width;
+	int				pcie_speed;
+	u8				ver_str[VER_SIZE];
+
+	/* Add MF related configuration */
+	u8				mcp_rev;
+	u8				boot_mode;
+
+	u8				wol;
+
+	u32				int_mode;
+	enum qed_coalescing_mode	int_coalescing_mode;
+	u8				rx_coalesce_usecs;
+	u8				tx_coalesce_usecs;
+
+	/* Start Bar offset of first hwfn */
+	void __iomem			*regview;
+	void __iomem			*doorbells;
+	u64				db_phys_addr;
+	unsigned long			db_size;
+
+	/* PCI */
+	u8				cache_shift;
+
+	/* Init */
+	const struct iro		*iro_arr;
+#define IRO (p_hwfn->cdev->iro_arr)
+
+	/* HW functions */
+	u8				num_hwfns;
+	struct qed_hwfn			hwfns[MAX_HWFNS_PER_DEVICE];
+
+	u32				drv_type;
+
+	struct qed_eth_stats		*reset_stats;
+	struct qed_fw_data		*fw_data;
+
+	u32				mcp_nvm_resp;
+
+	/* Linux specific here */
+	struct  qede_dev		*edev;
+	struct  pci_dev			*pdev;
+	int				msg_enable;
+
+	struct pci_params		pci_params;
+
+	struct qed_int_params		int_params;
+
+	u8				protocol;
+#define IS_QED_ETH_IF(cdev)     ((cdev)->protocol == QED_PROTOCOL_ETH)
+
+	/* Callbacks to protocol driver */
+	union {
+		struct qed_common_cb_ops	*common;
+		struct qed_eth_cb_ops		*eth;
+	} protocol_ops;
+	void				*ops_cookie;
+
+	const struct firmware		*firmware;
+};
+
+#define QED_GET_TYPE(dev)       (((dev)->type & QED_DEV_TYPE_MASK) >> \
+				 QED_DEV_TYPE_SHIFT)
+#define QED_IS_BB_A0(dev)       (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0)
+#define QED_IS_BB(dev)  (QED_IS_BB_A0(dev))
+
+#define NUM_OF_SBS(dev)         MAX_SB_PER_PATH_BB
+#define NUM_OF_ENG_PFS(dev)     MAX_NUM_PFS_BB
+
+/**
+ * @brief qed_concrete_to_sw_fid - get the sw function id from
+ *        the concrete value.
+ *
+ * @param concrete_fid
+ *
+ * @return inline u8
+ */
+static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
+					u32 concrete_fid)
+{
+	u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+
+	return pfid;
+}
+
+#define PURE_LB_TC 8
+
+#define QED_LEADING_HWFN(dev)   (&dev->hwfns[0])
+
+/* Other Linux specific common definitions */
+#define DP_NAME(cdev) ((cdev)->name)
+
+#define REG_ADDR(cdev, offset)          (void __iomem *)((u8 __iomem *)\
+						(cdev->regview) + \
+							 (offset))
+
+#define REG_RD(cdev, offset)            readl(REG_ADDR(cdev, offset))
+#define REG_WR(cdev, offset, val)       writel((u32)val, REG_ADDR(cdev, offset))
+#define REG_WR16(cdev, offset, val)     writew((u16)val, REG_ADDR(cdev, offset))
+
+#define DOORBELL(cdev, db_addr, val)			 \
+	writel((u32)val, (void __iomem *)((u8 __iomem *)\
+					  (cdev->doorbells) + (db_addr)))
+
+/* Prototypes */
+int qed_fill_dev_info(struct qed_dev *cdev,
+		      struct qed_dev_info *dev_info);
+void qed_link_update(struct qed_hwfn *hwfn);
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
+		   u32 input_len, u8 *input_buf,
+		   u32 max_size, u8 *unzip_buf);
+
+#define QED_ETH_INTERFACE_VERSION       300
+
+#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
new file mode 100644
index 0000000..7ccdb46
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -0,0 +1,847 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES		PROTOCOLID_COMMON
+#define NUM_TASK_TYPES		2
+#define NUM_TASK_PF_SEGMENTS	4
+
+/* QM constants */
+#define QM_PQ_ELEMENT_SIZE	4 /* in bytes */
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT		4
+#define DQ_RANGE_ALIGN		BIT(DQ_RANGE_SHIFT)
+
+/* ILT constants */
+#define ILT_DEFAULT_HW_P_SIZE		3
+#define ILT_PAGE_IN_BYTES(hw_p_size)	(1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg)	PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK		0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_SHIFT	0
+#define ILT_ENTRY_VALID_MASK		0x1ULL
+#define ILT_ENTRY_VALID_SHIFT		52
+#define ILT_ENTRY_IN_REGS		2
+#define ILT_REG_SIZE_IN_BYTES		4
+
+/* connection context union */
+union conn_context {
+	struct core_conn_context core_ctx;
+	struct eth_conn_context eth_ctx;
+};
+
+#define CONN_CXT_SIZE(p_hwfn) \
+	ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+/* PF per protocl configuration object */
+struct qed_conn_type_cfg {
+	u32 cid_count;
+	u32 cid_start;
+};
+
+/* ILT Client configuration, Per connection type (protocol) resources. */
+#define ILT_CLI_PF_BLOCKS	(1 + NUM_TASK_PF_SEGMENTS * 2)
+#define CDUC_BLK		(0)
+
+enum ilt_clients {
+	ILT_CLI_CDUC,
+	ILT_CLI_QM,
+	ILT_CLI_MAX
+};
+
+struct ilt_cfg_pair {
+	u32 reg;
+	u32 val;
+};
+
+struct qed_ilt_cli_blk {
+	u32 total_size; /* 0 means not active */
+	u32 real_size_in_page;
+	u32 start_line;
+};
+
+struct qed_ilt_client_cfg {
+	bool active;
+
+	/* ILT boundaries */
+	struct ilt_cfg_pair first;
+	struct ilt_cfg_pair last;
+	struct ilt_cfg_pair p_size;
+
+	/* ILT client blocks for PF */
+	struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+	u32 pf_total_lines;
+};
+
+/* Per Path -
+ *      ILT shadow table
+ *      Protocol acquired CID lists
+ *      PF start line in ILT
+ */
+struct qed_dma_mem {
+	dma_addr_t p_phys;
+	void *p_virt;
+	size_t size;
+};
+
+struct qed_cid_acquired_map {
+	u32		start_cid;
+	u32		max_count;
+	unsigned long	*cid_map;
+};
+
+struct qed_cxt_mngr {
+	/* Per protocl configuration */
+	struct qed_conn_type_cfg	conn_cfg[MAX_CONN_TYPES];
+
+	/* computed ILT structure */
+	struct qed_ilt_client_cfg	clients[ILT_CLI_MAX];
+
+	/* Acquired CIDs */
+	struct qed_cid_acquired_map	acquired[MAX_CONN_TYPES];
+
+	/* ILT  shadow table */
+	struct qed_dma_mem		*ilt_shadow;
+	u32				pf_start_line;
+};
+
+static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
+{
+	u32 type, pf_cids = 0;
+
+	for (type = 0; type < MAX_CONN_TYPES; type++)
+		pf_cids += p_mngr->conn_cfg[type].cid_count;
+
+	return pf_cids;
+}
+
+static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
+			    struct qed_qm_iids *iids)
+{
+	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	int type;
+
+	for (type = 0; type < MAX_CONN_TYPES; type++)
+		iids->cids += p_mngr->conn_cfg[type].cid_count;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
+}
+
+/* set the iids count per protocol */
+static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
+					enum protocol_type type,
+					u32 cid_count)
+{
+	struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+	struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+	p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+}
+
+static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
+				 struct qed_ilt_cli_blk *p_blk,
+				 u32 start_line, u32 total_size,
+				 u32 elem_size)
+{
+	u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+	/* verify thatits called only once for each block */
+	if (p_blk->total_size)
+		return;
+
+	p_blk->total_size = total_size;
+	p_blk->real_size_in_page = 0;
+	if (elem_size)
+		p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+	p_blk->start_line = start_line;
+}
+
+static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
+				 struct qed_ilt_client_cfg *p_cli,
+				 struct qed_ilt_cli_blk *p_blk,
+				 u32 *p_line, enum ilt_clients client_id)
+{
+	if (!p_blk->total_size)
+		return;
+
+	if (!p_cli->active)
+		p_cli->first.val = *p_line;
+
+	p_cli->active = true;
+	*p_line += DIV_ROUND_UP(p_blk->total_size,
+				p_blk->real_size_in_page);
+	p_cli->last.val = *p_line - 1;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+		   "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
+		   client_id, p_cli->first.val,
+		   p_cli->last.val, p_blk->total_size,
+		   p_blk->real_size_in_page, p_blk->start_line);
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
+{
+	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	struct qed_ilt_client_cfg *p_cli;
+	struct qed_ilt_cli_blk *p_blk;
+	u32 curr_line, total, pf_cids;
+	struct qed_qm_iids qm_iids;
+
+	memset(&qm_iids, 0, sizeof(qm_iids));
+
+	p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+		   "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
+		   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+	/* CDUC */
+	p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+	curr_line = p_mngr->pf_start_line;
+	p_cli->pf_total_lines = 0;
+
+	/* get the counters for the CDUC and QM clients  */
+	pf_cids = qed_cxt_cdu_iids(p_mngr);
+
+	p_blk = &p_cli->pf_blks[CDUC_BLK];
+
+	total = pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+	qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+			     total, CONN_CXT_SIZE(p_hwfn));
+
+	qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+	p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+	/* QM */
+	p_cli = &p_mngr->clients[ILT_CLI_QM];
+	p_blk = &p_cli->pf_blks[0];
+
+	qed_cxt_qm_iids(p_hwfn, &qm_iids);
+	total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
+				   p_hwfn->qm_info.num_pqs, 0);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+		   "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
+		   qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
+
+	qed_ilt_cli_blk_fill(p_cli, p_blk,
+			     curr_line, total * 0x1000,
+			     QM_PQ_ELEMENT_SIZE);
+
+	qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+	p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+	if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+	    RESC_NUM(p_hwfn, QED_ILT)) {
+		DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
+		       curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#define for_each_ilt_valid_client(pos, clients)	\
+		for (pos = 0; pos < ILT_CLI_MAX; pos++)
+
+/* Total number of ILT lines used by this PF */
+static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
+{
+	u32 size = 0;
+	u32 i;
+
+	for_each_ilt_valid_client(i, ilt_clients) {
+		if (!ilt_clients[i].active)
+			continue;
+		size += (ilt_clients[i].last.val -
+			 ilt_clients[i].first.val + 1);
+	}
+
+	return size;
+}
+
+static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
+{
+	struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	u32 ilt_size, i;
+
+	ilt_size = qed_cxt_ilt_shadow_size(p_cli);
+
+	for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+		struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+
+		if (p_dma->p_virt)
+			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+					  p_dma->size, p_dma->p_virt,
+					  p_dma->p_phys);
+		p_dma->p_virt = NULL;
+	}
+	kfree(p_mngr->ilt_shadow);
+}
+
+static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
+			     struct qed_ilt_cli_blk *p_blk,
+			     enum ilt_clients ilt_client,
+			     u32 start_line_offset)
+{
+	struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+	u32 lines, line, sz_left;
+
+	if (!p_blk->total_size)
+		return 0;
+
+	sz_left = p_blk->total_size;
+	lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
+	line = p_blk->start_line + start_line_offset -
+	       p_hwfn->p_cxt_mngr->pf_start_line;
+
+	for (; lines; lines--) {
+		dma_addr_t p_phys;
+		void *p_virt;
+		u32 size;
+
+		size = min_t(u32, sz_left,
+			     p_blk->real_size_in_page);
+		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+					    size,
+					    &p_phys,
+					    GFP_KERNEL);
+		if (!p_virt)
+			return -ENOMEM;
+		memset(p_virt, 0, size);
+
+		ilt_shadow[line].p_phys = p_phys;
+		ilt_shadow[line].p_virt = p_virt;
+		ilt_shadow[line].size = size;
+
+		DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+			   "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
+			    line, (u64)p_phys, p_virt, size);
+
+		sz_left -= size;
+		line++;
+	}
+
+	return 0;
+}
+
+static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
+{
+	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	struct qed_ilt_client_cfg *clients = p_mngr->clients;
+	struct qed_ilt_cli_blk *p_blk;
+	u32 size, i, j;
+	int rc;
+
+	size = qed_cxt_ilt_shadow_size(clients);
+	p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
+				     GFP_KERNEL);
+	if (!p_mngr->ilt_shadow) {
+		DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
+		rc = -ENOMEM;
+		goto ilt_shadow_fail;
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+		   "Allocated 0x%x bytes for ilt shadow\n",
+		   (u32)(size * sizeof(struct qed_dma_mem)));
+
+	for_each_ilt_valid_client(i, clients) {
+		if (!clients[i].active)
+			continue;
+		for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+			p_blk = &clients[i].pf_blks[j];
+			rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+			if (rc != 0)
+				goto ilt_shadow_fail;
+		}
+	}
+
+	return 0;
+
+ilt_shadow_fail:
+	qed_ilt_shadow_free(p_hwfn);
+	return rc;
+}
+
+static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
+{
+	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	u32 type;
+
+	for (type = 0; type < MAX_CONN_TYPES; type++) {
+		kfree(p_mngr->acquired[type].cid_map);
+		p_mngr->acquired[type].max_count = 0;
+		p_mngr->acquired[type].start_cid = 0;
+	}
+}
+
+static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
+{
+	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	u32 start_cid = 0;
+	u32 type;
+
+	for (type = 0; type < MAX_CONN_TYPES; type++) {
+		u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+		u32 size;
+
+		if (cid_cnt == 0)
+			continue;
+
+		size = DIV_ROUND_UP(cid_cnt,
+				    sizeof(unsigned long) * BITS_PER_BYTE) *
+		       sizeof(unsigned long);
+		p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
+		if (!p_mngr->acquired[type].cid_map)
+			goto cid_map_fail;
+
+		p_mngr->acquired[type].max_count = cid_cnt;
+		p_mngr->acquired[type].start_cid = start_cid;
+
+		p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+
+		DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+			   "Type %08x start: %08x count %08x\n",
+			   type, p_mngr->acquired[type].start_cid,
+			   p_mngr->acquired[type].max_count);
+		start_cid += cid_cnt;
+	}
+
+	return 0;
+
+cid_map_fail:
+	qed_cid_map_free(p_hwfn);
+	return -ENOMEM;
+}
+
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
+{
+	struct qed_cxt_mngr *p_mngr;
+	u32 i;
+
+	p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC);
+	if (!p_mngr) {
+		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
+		return -ENOMEM;
+	}
+
+	/* Initialize ILT client registers */
+	p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+	p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+	p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+	p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+	p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+	p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+	/* default ILT page size for all clients is 32K */
+	for (i = 0; i < ILT_CLI_MAX; i++)
+		p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+	/* Set the cxt mangr pointer priori to further allocations */
+	p_hwfn->p_cxt_mngr = p_mngr;
+
+	return 0;
+}
+
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
+{
+	int rc;
+
+	/* Allocate the ILT shadow table */
+	rc = qed_ilt_shadow_alloc(p_hwfn);
+	if (rc) {
+		DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
+		goto tables_alloc_fail;
+	}
+
+	/* Allocate and initialize the acquired cids bitmaps */
+	rc = qed_cid_map_alloc(p_hwfn);
+	if (rc) {
+		DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
+		goto tables_alloc_fail;
+	}
+
+	return 0;
+
+tables_alloc_fail:
+	qed_cxt_mngr_free(p_hwfn);
+	return rc;
+}
+
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
+{
+	if (!p_hwfn->p_cxt_mngr)
+		return;
+
+	qed_cid_map_free(p_hwfn);
+	qed_ilt_shadow_free(p_hwfn);
+	kfree(p_hwfn->p_cxt_mngr);
+
+	p_hwfn->p_cxt_mngr = NULL;
+}
+
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
+{
+	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	int type;
+
+	/* Reset acquired cids */
+	for (type = 0; type < MAX_CONN_TYPES; type++) {
+		u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+
+		if (cid_cnt == 0)
+			continue;
+
+		memset(p_mngr->acquired[type].cid_map, 0,
+		       DIV_ROUND_UP(cid_cnt,
+				    sizeof(unsigned long) * BITS_PER_BYTE) *
+		       sizeof(unsigned long));
+	}
+}
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT \
+	CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK \
+	(CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT \
+	CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK \
+	(CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT	\
+	CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK \
+	(CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
+{
+	u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+	/* CDUC - connection configuration */
+	page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+	cxt_size = CONN_CXT_SIZE(p_hwfn);
+	elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+	block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+	SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+	SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+	SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+	STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+}
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
+{
+	struct qed_qm_pf_rt_init_params params;
+	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+	struct qed_qm_iids iids;
+
+	memset(&iids, 0, sizeof(iids));
+	qed_cxt_qm_iids(p_hwfn, &iids);
+
+	memset(&params, 0, sizeof(params));
+	params.port_id = p_hwfn->port_id;
+	params.pf_id = p_hwfn->rel_pf_id;
+	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+	params.is_first_pf = p_hwfn->first_on_engine;
+	params.num_pf_cids = iids.cids;
+	params.start_pq = qm_info->start_pq;
+	params.num_pf_pqs = qm_info->num_pqs;
+	params.start_vport = qm_info->num_vports;
+	params.pf_wfq = qm_info->pf_wfq;
+	params.pf_rl = qm_info->pf_rl;
+	params.pq_params = qm_info->qm_pq_params;
+	params.vport_params = qm_info->qm_vport_params;
+
+	qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
+}
+
+/* CM PF */
+static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+{
+	union qed_qm_pq_params pq_params;
+	u16 pq;
+
+	/* XCM pure-LB queue */
+	memset(&pq_params, 0, sizeof(pq_params));
+	pq_params.core.tc = LB_TC;
+	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+	STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
+
+	return 0;
+}
+
+/* DQ PF */
+static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
+{
+	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	u32 dq_pf_max_cid = 0;
+
+	dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+	dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+	dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+	dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+	dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+	/* 5 - PF */
+	dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+}
+
+static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
+{
+	struct qed_ilt_client_cfg *ilt_clients;
+	int i;
+
+	ilt_clients = p_hwfn->p_cxt_mngr->clients;
+	for_each_ilt_valid_client(i, ilt_clients) {
+		if (!ilt_clients[i].active)
+			continue;
+		STORE_RT_REG(p_hwfn,
+			     ilt_clients[i].first.reg,
+			     ilt_clients[i].first.val);
+		STORE_RT_REG(p_hwfn,
+			     ilt_clients[i].last.reg,
+			     ilt_clients[i].last.val);
+		STORE_RT_REG(p_hwfn,
+			     ilt_clients[i].p_size.reg,
+			     ilt_clients[i].p_size.val);
+	}
+}
+
+/* ILT (PSWRQ2) PF */
+static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
+{
+	struct qed_ilt_client_cfg *clients;
+	struct qed_cxt_mngr *p_mngr;
+	struct qed_dma_mem *p_shdw;
+	u32 line, rt_offst, i;
+
+	qed_ilt_bounds_init(p_hwfn);
+
+	p_mngr = p_hwfn->p_cxt_mngr;
+	p_shdw = p_mngr->ilt_shadow;
+	clients = p_hwfn->p_cxt_mngr->clients;
+
+	for_each_ilt_valid_client(i, clients) {
+		if (!clients[i].active)
+			continue;
+
+		/** Client's 1st val and RT array are absolute, ILT shadows'
+		 *  lines are relative.
+		 */
+		line = clients[i].first.val - p_mngr->pf_start_line;
+		rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+			   clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+		for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+		     line++, rt_offst += ILT_ENTRY_IN_REGS) {
+			u64 ilt_hw_entry = 0;
+
+			/** p_virt could be NULL incase of dynamic
+			 *  allocation
+			 */
+			if (p_shdw[line].p_virt) {
+				SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+				SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+					  (p_shdw[line].p_phys >> 12));
+
+				DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+					   "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
+					   rt_offst, line, i,
+					   (u64)(p_shdw[line].p_phys >> 12));
+			}
+
+			STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+		}
+	}
+}
+
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
+{
+	qed_cdu_init_common(p_hwfn);
+}
+
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
+{
+	qed_qm_init_pf(p_hwfn);
+	qed_cm_init_pf(p_hwfn);
+	qed_dq_init_pf(p_hwfn);
+	qed_ilt_init_pf(p_hwfn);
+}
+
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+			enum protocol_type type,
+			u32 *p_cid)
+{
+	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	u32 rel_cid;
+
+	if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+		DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
+		return -EINVAL;
+	}
+
+	rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
+				      p_mngr->acquired[type].max_count);
+
+	if (rel_cid >= p_mngr->acquired[type].max_count) {
+		DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
+			  type);
+		return -EINVAL;
+	}
+
+	__set_bit(rel_cid, p_mngr->acquired[type].cid_map);
+
+	*p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+
+	return 0;
+}
+
+static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
+				      u32 cid,
+				      enum protocol_type *p_type)
+{
+	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	struct qed_cid_acquired_map *p_map;
+	enum protocol_type p;
+	u32 rel_cid;
+
+	/* Iterate over protocols and find matching cid range */
+	for (p = 0; p < MAX_CONN_TYPES; p++) {
+		p_map = &p_mngr->acquired[p];
+
+		if (!p_map->cid_map)
+			continue;
+		if (cid >= p_map->start_cid &&
+		    cid < p_map->start_cid + p_map->max_count)
+			break;
+	}
+	*p_type = p;
+
+	if (p == MAX_CONN_TYPES) {
+		DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
+		return false;
+	}
+
+	rel_cid = cid - p_map->start_cid;
+	if (!test_bit(rel_cid, p_map->cid_map)) {
+		DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
+		return false;
+	}
+	return true;
+}
+
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+			 u32 cid)
+{
+	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	enum protocol_type type;
+	bool b_acquired;
+	u32 rel_cid;
+
+	/* Test acquired and find matching per-protocol map */
+	b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
+
+	if (!b_acquired)
+		return;
+
+	rel_cid = cid - p_mngr->acquired[type].start_cid;
+	__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
+}
+
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+			 struct qed_cxt_info *p_info)
+{
+	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+	enum protocol_type type;
+	bool b_acquired;
+
+	/* Test acquired and find matching per-protocol map */
+	b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+
+	if (!b_acquired)
+		return -EINVAL;
+
+	/* set the protocl type */
+	p_info->type = type;
+
+	/* compute context virtual pointer */
+	hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+	conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+	cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+	line = p_info->iid / cxts_per_p;
+
+	/* Make sure context is allocated (dynamic allocation) */
+	if (!p_mngr->ilt_shadow[line].p_virt)
+		return -EINVAL;
+
+	p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
+			p_info->iid % cxts_per_p * conn_cxt_size;
+
+	DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
+		   "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+		   p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
+
+	return 0;
+}
+
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+{
+	struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
+
+	/* Set the number of required CORE connections */
+	u32 core_cids = 1; /* SPQ */
+
+	qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
+
+	qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+				    p_params->num_cons);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
new file mode 100644
index 0000000..c8e1f5e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -0,0 +1,139 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_CXT_H
+#define _QED_CXT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+#include "qed.h"
+
+struct qed_cxt_info {
+	void			*p_cxt;
+	u32			iid;
+	enum protocol_type	type;
+};
+
+/**
+ * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return int
+ */
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+			enum protocol_type type,
+			u32 *p_cid);
+
+/**
+ * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
+ *
+ *
+ * @param p_hwfn
+ * @param p_info in/out
+ *
+ * @return int
+ */
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+			 struct qed_cxt_info *p_info);
+
+enum qed_cxt_elem_type {
+	QED_ELEM_CXT,
+	QED_ELEM_TASK
+};
+
+/**
+ * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_free
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ *
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
+ *
+ * @param p_hwfn
+ */
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_cxt_release - Release a cid
+ *
+ * @param p_hwfn
+ * @param cid
+ */
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
+			 u32 cid);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
new file mode 100644
index 0000000..b9b7b7e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -0,0 +1,1797 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+/* API common to all protocols */
+void qed_init_dp(struct qed_dev *cdev,
+		 u32 dp_module, u8 dp_level)
+{
+	u32 i;
+
+	cdev->dp_level = dp_level;
+	cdev->dp_module = dp_module;
+	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		p_hwfn->dp_level = dp_level;
+		p_hwfn->dp_module = dp_module;
+	}
+}
+
+void qed_init_struct(struct qed_dev *cdev)
+{
+	u8 i;
+
+	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		p_hwfn->cdev = cdev;
+		p_hwfn->my_id = i;
+		p_hwfn->b_active = false;
+
+		mutex_init(&p_hwfn->dmae_info.mutex);
+	}
+
+	/* hwfn 0 is always active */
+	cdev->hwfns[0].b_active = true;
+
+	/* set the default cache alignment to 128 */
+	cdev->cache_shift = 7;
+}
+
+static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
+{
+	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+	kfree(qm_info->qm_pq_params);
+	qm_info->qm_pq_params = NULL;
+	kfree(qm_info->qm_vport_params);
+	qm_info->qm_vport_params = NULL;
+	kfree(qm_info->qm_port_params);
+	qm_info->qm_port_params = NULL;
+}
+
+void qed_resc_free(struct qed_dev *cdev)
+{
+	int i;
+
+	kfree(cdev->fw_data);
+	cdev->fw_data = NULL;
+
+	kfree(cdev->reset_stats);
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		kfree(p_hwfn->p_tx_cids);
+		p_hwfn->p_tx_cids = NULL;
+		kfree(p_hwfn->p_rx_cids);
+		p_hwfn->p_rx_cids = NULL;
+	}
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		qed_cxt_mngr_free(p_hwfn);
+		qed_qm_info_free(p_hwfn);
+		qed_spq_free(p_hwfn);
+		qed_eq_free(p_hwfn, p_hwfn->p_eq);
+		qed_consq_free(p_hwfn, p_hwfn->p_consq);
+		qed_int_free(p_hwfn);
+		qed_dmae_info_free(p_hwfn);
+	}
+}
+
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+{
+	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+	struct init_qm_port_params *p_qm_port;
+	u8 num_vports, i, vport_id, num_ports;
+	u16 num_pqs, multi_cos_tcs = 1;
+
+	memset(qm_info, 0, sizeof(*qm_info));
+
+	num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
+	num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+
+	/* Sanity checking that setup requires legal number of resources */
+	if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
+		DP_ERR(p_hwfn,
+		       "Need too many Physical queues - 0x%04x when only %04x are available\n",
+		       num_pqs, RESC_NUM(p_hwfn, QED_PQ));
+		return -EINVAL;
+	}
+
+	/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
+	 */
+	qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
+					num_pqs, GFP_ATOMIC);
+	if (!qm_info->qm_pq_params)
+		goto alloc_err;
+
+	qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
+					   num_vports, GFP_ATOMIC);
+	if (!qm_info->qm_vport_params)
+		goto alloc_err;
+
+	qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
+					  MAX_NUM_PORTS, GFP_ATOMIC);
+	if (!qm_info->qm_port_params)
+		goto alloc_err;
+
+	vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+	/* First init per-TC PQs */
+	for (i = 0; i < multi_cos_tcs; i++) {
+		struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
+
+		params->vport_id = vport_id;
+		params->tc_id = p_hwfn->hw_info.non_offload_tc;
+		params->wrr_group = 1;
+	}
+
+	/* Then init pure-LB PQ */
+	qm_info->pure_lb_pq = i;
+	qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+	qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
+	qm_info->qm_pq_params[i].wrr_group = 1;
+	i++;
+
+	qm_info->offload_pq = 0;
+	qm_info->num_pqs = num_pqs;
+	qm_info->num_vports = num_vports;
+
+	/* Initialize qm port parameters */
+	num_ports = p_hwfn->cdev->num_ports_in_engines;
+	for (i = 0; i < num_ports; i++) {
+		p_qm_port = &qm_info->qm_port_params[i];
+		p_qm_port->active = 1;
+		p_qm_port->num_active_phys_tcs = 4;
+		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+	}
+
+	qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+
+	qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+
+	qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+	qm_info->pf_wfq = 0;
+	qm_info->pf_rl = 0;
+	qm_info->vport_rl_en = 1;
+
+	return 0;
+
+alloc_err:
+	DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+	kfree(qm_info->qm_pq_params);
+	kfree(qm_info->qm_vport_params);
+	kfree(qm_info->qm_port_params);
+
+	return -ENOMEM;
+}
+
+int qed_resc_alloc(struct qed_dev *cdev)
+{
+	struct qed_consq *p_consq;
+	struct qed_eq *p_eq;
+	int i, rc = 0;
+
+	cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
+	if (!cdev->fw_data)
+		return -ENOMEM;
+
+	/* Allocate Memory for the Queue->CID mapping */
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+		int tx_size = sizeof(struct qed_hw_cid_data) *
+				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
+		int rx_size = sizeof(struct qed_hw_cid_data) *
+				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
+
+		p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
+		if (!p_hwfn->p_tx_cids) {
+			DP_NOTICE(p_hwfn,
+				  "Failed to allocate memory for Tx Cids\n");
+			goto alloc_err;
+		}
+
+		p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
+		if (!p_hwfn->p_rx_cids) {
+			DP_NOTICE(p_hwfn,
+				  "Failed to allocate memory for Rx Cids\n");
+			goto alloc_err;
+		}
+	}
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		/* First allocate the context manager structure */
+		rc = qed_cxt_mngr_alloc(p_hwfn);
+		if (rc)
+			goto alloc_err;
+
+		/* Set the HW cid/tid numbers (in the contest manager)
+		 * Must be done prior to any further computations.
+		 */
+		rc = qed_cxt_set_pf_params(p_hwfn);
+		if (rc)
+			goto alloc_err;
+
+		/* Prepare and process QM requirements */
+		rc = qed_init_qm_info(p_hwfn);
+		if (rc)
+			goto alloc_err;
+
+		/* Compute the ILT client partition */
+		rc = qed_cxt_cfg_ilt_compute(p_hwfn);
+		if (rc)
+			goto alloc_err;
+
+		/* CID map / ILT shadow table / T2
+		 * The talbes sizes are determined by the computations above
+		 */
+		rc = qed_cxt_tables_alloc(p_hwfn);
+		if (rc)
+			goto alloc_err;
+
+		/* SPQ, must follow ILT because initializes SPQ context */
+		rc = qed_spq_alloc(p_hwfn);
+		if (rc)
+			goto alloc_err;
+
+		/* SP status block allocation */
+		p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
+							 RESERVED_PTT_DPC);
+
+		rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+		if (rc)
+			goto alloc_err;
+
+		/* EQ */
+		p_eq = qed_eq_alloc(p_hwfn, 256);
+
+		if (!p_eq)
+			goto alloc_err;
+		p_hwfn->p_eq = p_eq;
+
+		p_consq = qed_consq_alloc(p_hwfn);
+		if (!p_consq)
+			goto alloc_err;
+		p_hwfn->p_consq = p_consq;
+
+		/* DMA info initialization */
+		rc = qed_dmae_info_alloc(p_hwfn);
+		if (rc) {
+			DP_NOTICE(p_hwfn,
+				  "Failed to allocate memory for dmae_info structure\n");
+			goto alloc_err;
+		}
+	}
+
+	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
+	if (!cdev->reset_stats) {
+		DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
+		goto alloc_err;
+	}
+
+	return 0;
+
+alloc_err:
+	qed_resc_free(cdev);
+	return rc;
+}
+
+void qed_resc_setup(struct qed_dev *cdev)
+{
+	int i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		qed_cxt_mngr_setup(p_hwfn);
+		qed_spq_setup(p_hwfn);
+		qed_eq_setup(p_hwfn, p_hwfn->p_eq);
+		qed_consq_setup(p_hwfn, p_hwfn->p_consq);
+
+		/* Read shadow of current MFW mailbox */
+		qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+		memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+		       p_hwfn->mcp_info->mfw_mb_cur,
+		       p_hwfn->mcp_info->mfw_mb_length);
+
+		qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+	}
+}
+
+#define FINAL_CLEANUP_CMD_OFFSET        (0)
+#define FINAL_CLEANUP_CMD (0x1)
+#define FINAL_CLEANUP_VALID_OFFSET      (6)
+#define FINAL_CLEANUP_VFPF_ID_SHIFT     (7)
+#define FINAL_CLEANUP_COMP (0x2)
+#define FINAL_CLEANUP_POLL_CNT          (100)
+#define FINAL_CLEANUP_POLL_TIME         (10)
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+		      struct qed_ptt *p_ptt,
+		      u16 id)
+{
+	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+	int rc = -EBUSY;
+
+	addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
+
+	command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
+	command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
+	command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
+	command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+	/* Make sure notification is not set before initiating final cleanup */
+	if (REG_RD(p_hwfn, addr)) {
+		DP_NOTICE(
+			p_hwfn,
+			"Unexpected; Found final cleanup notification before initiating final cleanup\n");
+		REG_WR(p_hwfn, addr, 0);
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+		   "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+		   id, command);
+
+	qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
+
+	/* Poll until completion */
+	while (!REG_RD(p_hwfn, addr) && count--)
+		msleep(FINAL_CLEANUP_POLL_TIME);
+
+	if (REG_RD(p_hwfn, addr))
+		rc = 0;
+	else
+		DP_NOTICE(p_hwfn,
+			  "Failed to receive FW final cleanup notification\n");
+
+	/* Cleanup afterwards */
+	REG_WR(p_hwfn, addr, 0);
+
+	return rc;
+}
+
+static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+{
+	int hw_mode = 0;
+
+	hw_mode = (1 << MODE_BB_A0);
+
+	switch (p_hwfn->cdev->num_ports_in_engines) {
+	case 1:
+		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+		break;
+	case 2:
+		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+		break;
+	case 4:
+		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+		break;
+	default:
+		DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
+			  p_hwfn->cdev->num_ports_in_engines);
+		return;
+	}
+
+	switch (p_hwfn->cdev->mf_mode) {
+	case SF:
+		hw_mode |= 1 << MODE_SF;
+		break;
+	case MF_OVLAN:
+		hw_mode |= 1 << MODE_MF_SD;
+		break;
+	case MF_NPAR:
+		hw_mode |= 1 << MODE_MF_SI;
+		break;
+	default:
+		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
+		hw_mode |= 1 << MODE_SF;
+	}
+
+	hw_mode |= 1 << MODE_ASIC;
+
+	p_hwfn->hw_info.hw_mode = hw_mode;
+}
+
+/* Init run time data for all PFs on an engine. */
+static void qed_init_cau_rt_data(struct qed_dev *cdev)
+{
+	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+	int i, sb_id;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+		struct qed_igu_info *p_igu_info;
+		struct qed_igu_block *p_block;
+		struct cau_sb_entry sb_entry;
+
+		p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+		for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
+		     sb_id++) {
+			p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+			if (!p_block->is_pf)
+				continue;
+
+			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+					      p_block->function_id,
+					      0, 0);
+			STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
+					 sb_entry);
+		}
+	}
+}
+
+static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
+			      struct qed_ptt *p_ptt,
+			      int hw_mode)
+{
+	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+	struct qed_qm_common_rt_init_params params;
+	struct qed_dev *cdev = p_hwfn->cdev;
+	int rc = 0;
+
+	qed_init_cau_rt_data(cdev);
+
+	/* Program GTT windows */
+	qed_gtt_init(p_hwfn);
+
+	if (p_hwfn->mcp_info) {
+		if (p_hwfn->mcp_info->func_info.bandwidth_max)
+			qm_info->pf_rl_en = 1;
+		if (p_hwfn->mcp_info->func_info.bandwidth_min)
+			qm_info->pf_wfq_en = 1;
+	}
+
+	memset(&params, 0, sizeof(params));
+	params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
+	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+	params.pf_rl_en = qm_info->pf_rl_en;
+	params.pf_wfq_en = qm_info->pf_wfq_en;
+	params.vport_rl_en = qm_info->vport_rl_en;
+	params.vport_wfq_en = qm_info->vport_wfq_en;
+	params.port_params = qm_info->qm_port_params;
+
+	qed_qm_common_rt_init(p_hwfn, &params);
+
+	qed_cxt_hw_init_common(p_hwfn);
+
+	/* Close gate from NIG to BRB/Storm; By default they are open, but
+	 * we close them to prevent NIG from passing data to reset blocks.
+	 * Should have been done in the ENGINE phase, but init-tool lacks
+	 * proper port-pretend capabilities.
+	 */
+	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+	qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
+	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+	qed_port_unpretend(p_hwfn, p_ptt);
+
+	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+	if (rc != 0)
+		return rc;
+
+	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+	/* Disable relaxed ordering in the PCI config space */
+	qed_wr(p_hwfn, p_ptt, 0x20b4,
+	       qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
+
+	return rc;
+}
+
+static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
+			    struct qed_ptt *p_ptt,
+			    int hw_mode)
+{
+	int rc = 0;
+
+	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+			  hw_mode);
+	return rc;
+}
+
+static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
+			  struct qed_ptt *p_ptt,
+			  int hw_mode,
+			  bool b_hw_start,
+			  enum qed_int_mode int_mode,
+			  bool allow_npar_tx_switch)
+{
+	u8 rel_pf_id = p_hwfn->rel_pf_id;
+	int rc = 0;
+
+	if (p_hwfn->mcp_info) {
+		struct qed_mcp_function_info *p_info;
+
+		p_info = &p_hwfn->mcp_info->func_info;
+		if (p_info->bandwidth_min)
+			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+		/* Update rate limit once we'll actually have a link */
+		p_hwfn->qm_info.pf_rl = 100;
+	}
+
+	qed_cxt_hw_init_pf(p_hwfn);
+
+	qed_int_igu_init_rt(p_hwfn);
+
+	/* Set VLAN in NIG if needed */
+	if (hw_mode & (1 << MODE_MF_SD)) {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+			     p_hwfn->hw_info.ovlan);
+	}
+
+	/* Enable classification by MAC if needed */
+	if (hw_mode & MODE_MF_SI) {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+			   "Configuring TAGMAC_CLS_TYPE\n");
+		STORE_RT_REG(p_hwfn,
+			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
+	}
+
+	/* Protocl Configuration  */
+	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
+	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
+	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
+
+	/* Cleanup chip from previous driver if such remains exist */
+	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
+	if (rc != 0)
+		return rc;
+
+	/* PF Init sequence */
+	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+	if (rc)
+		return rc;
+
+	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
+	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+	if (rc)
+		return rc;
+
+	/* Pure runtime initializations - directly to the HW  */
+	qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+	if (b_hw_start) {
+		/* enable interrupts */
+		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
+
+		/* send function start command */
+		rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+		if (rc)
+			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
+	}
+	return rc;
+}
+
+static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
+			       struct qed_ptt *p_ptt,
+			       u8 enable)
+{
+	u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+
+	/* Change PF in PXP */
+	qed_wr(p_hwfn, p_ptt,
+	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+	/* wait until value is set - try for 1 second every 50us */
+	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+		val = qed_rd(p_hwfn, p_ptt,
+			     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+		if (val == set_val)
+			break;
+
+		usleep_range(50, 60);
+	}
+
+	if (val != set_val) {
+		DP_NOTICE(p_hwfn,
+			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
+				struct qed_ptt *p_main_ptt)
+{
+	/* Read shadow of current MFW mailbox */
+	qed_mcp_read_mb(p_hwfn, p_main_ptt);
+	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+	       p_hwfn->mcp_info->mfw_mb_cur,
+	       p_hwfn->mcp_info->mfw_mb_length);
+}
+
+int qed_hw_init(struct qed_dev *cdev,
+		bool b_hw_start,
+		enum qed_int_mode int_mode,
+		bool allow_npar_tx_switch,
+		const u8 *bin_fw_data)
+{
+	struct qed_storm_stats *p_stat;
+	u32 load_code, param, *p_address;
+	int rc, mfw_rc, i;
+	u8 fw_vport = 0;
+
+	rc = qed_init_fw_data(cdev, bin_fw_data);
+	if (rc != 0)
+		return rc;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		rc = qed_fw_vport(p_hwfn, 0, &fw_vport);
+		if (rc != 0)
+			return rc;
+
+		/* Enable DMAE in PXP */
+		rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
+
+		qed_calc_hw_mode(p_hwfn);
+
+		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+				      &load_code);
+		if (rc) {
+			DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+			return rc;
+		}
+
+		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+		DP_VERBOSE(p_hwfn, QED_MSG_SP,
+			   "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
+			   rc, load_code);
+
+		p_hwfn->first_on_engine = (load_code ==
+					   FW_MSG_CODE_DRV_LOAD_ENGINE);
+
+		switch (load_code) {
+		case FW_MSG_CODE_DRV_LOAD_ENGINE:
+			rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+						p_hwfn->hw_info.hw_mode);
+			if (rc)
+				break;
+		/* Fall into */
+		case FW_MSG_CODE_DRV_LOAD_PORT:
+			rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+					      p_hwfn->hw_info.hw_mode);
+			if (rc)
+				break;
+
+		/* Fall into */
+		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+					    p_hwfn->hw_info.hw_mode,
+					    b_hw_start, int_mode,
+					    allow_npar_tx_switch);
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+
+		if (rc)
+			DP_NOTICE(p_hwfn,
+				  "init phase failed for loadcode 0x%x (rc %d)\n",
+				   load_code, rc);
+
+		/* ACK mfw regardless of success or failure of initialization */
+		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+				     DRV_MSG_CODE_LOAD_DONE,
+				     0, &load_code, &param);
+		if (rc)
+			return rc;
+		if (mfw_rc) {
+			DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
+			return mfw_rc;
+		}
+
+		p_hwfn->hw_init_done = true;
+
+		/* init PF stats */
+		p_stat = &p_hwfn->storm_stats;
+		p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM +
+					 MSTORM_QUEUE_STAT_OFFSET(fw_vport);
+		p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+
+		p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM +
+					 USTORM_QUEUE_STAT_OFFSET(fw_vport);
+		p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+
+		p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM +
+					 PSTORM_QUEUE_STAT_OFFSET(fw_vport);
+		p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+
+		p_address = &p_stat->tstats.address;
+		*p_address = BAR0_MAP_REG_TSDM_RAM +
+			     TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+		p_stat->tstats.len = sizeof(struct tstorm_per_port_stat);
+	}
+
+	return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+int qed_hw_stop(struct qed_dev *cdev)
+{
+	int rc = 0, t_rc;
+	int i, j;
+
+	for_each_hwfn(cdev, j) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
+
+		/* mark the hw as uninitialized... */
+		p_hwfn->hw_init_done = false;
+
+		rc = qed_sp_pf_stop(p_hwfn);
+		if (rc)
+			return rc;
+
+		qed_wr(p_hwfn, p_ptt,
+		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+		qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+		qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+		for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+			if ((!qed_rd(p_hwfn, p_ptt,
+				     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+			    (!qed_rd(p_hwfn, p_ptt,
+				     TM_REG_PF_SCAN_ACTIVE_TASK)))
+				break;
+
+			usleep_range(1000, 2000);
+		}
+		if (i == QED_HW_STOP_RETRY_LIMIT)
+			DP_NOTICE(p_hwfn,
+				  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+				  (u8)qed_rd(p_hwfn, p_ptt,
+					     TM_REG_PF_SCAN_ACTIVE_CONN),
+				  (u8)qed_rd(p_hwfn, p_ptt,
+					     TM_REG_PF_SCAN_ACTIVE_TASK));
+
+		/* Disable Attention Generation */
+		qed_int_igu_disable_int(p_hwfn, p_ptt);
+
+		qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+		qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+
+		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+
+		/* Need to wait 1ms to guarantee SBs are cleared */
+		usleep_range(1000, 2000);
+	}
+
+	/* Disable DMAE in PXP - in CMT, this should only be done for
+	 * first hw-function, and only after all transactions have
+	 * stopped for all active hw-functions.
+	 */
+	t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+				   cdev->hwfns[0].p_main_ptt,
+				   false);
+	if (t_rc != 0)
+		rc = t_rc;
+
+	return rc;
+}
+
+void qed_hw_stop_fastpath(struct qed_dev *cdev)
+{
+	int i, j;
+
+	for_each_hwfn(cdev, j) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+		struct qed_ptt *p_ptt   = p_hwfn->p_main_ptt;
+
+		DP_VERBOSE(p_hwfn,
+			   NETIF_MSG_IFDOWN,
+			   "Shutting down the fastpath\n");
+
+		qed_wr(p_hwfn, p_ptt,
+		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+		qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+		qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+		for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+			if ((!qed_rd(p_hwfn, p_ptt,
+				     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+			    (!qed_rd(p_hwfn, p_ptt,
+				     TM_REG_PF_SCAN_ACTIVE_TASK)))
+				break;
+
+			usleep_range(1000, 2000);
+		}
+		if (i == QED_HW_STOP_RETRY_LIMIT)
+			DP_NOTICE(p_hwfn,
+				  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+				  (u8)qed_rd(p_hwfn, p_ptt,
+					     TM_REG_PF_SCAN_ACTIVE_CONN),
+				  (u8)qed_rd(p_hwfn, p_ptt,
+					     TM_REG_PF_SCAN_ACTIVE_TASK));
+
+		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+
+		/* Need to wait 1ms to guarantee SBs are cleared */
+		usleep_range(1000, 2000);
+	}
+}
+
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
+{
+	/* Re-open incoming traffic */
+	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+	       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+}
+
+static int qed_reg_assert(struct qed_hwfn *hwfn,
+			  struct qed_ptt *ptt, u32 reg,
+			  bool expected)
+{
+	u32 assert_val = qed_rd(hwfn, ptt, reg);
+
+	if (assert_val != expected) {
+		DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
+			  reg, expected);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int qed_hw_reset(struct qed_dev *cdev)
+{
+	int rc = 0;
+	u32 unload_resp, unload_param;
+	int i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
+
+		/* Check for incorrect states */
+		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+			       QM_REG_USG_CNT_PF_TX, 0);
+		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+			       QM_REG_USG_CNT_PF_OTHER, 0);
+
+		/* Disable PF in HW blocks */
+		qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+		qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
+		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+		       TCFC_REG_STRONG_ENABLE_PF, 0);
+		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+		       CCFC_REG_STRONG_ENABLE_PF, 0);
+
+		/* Send unload command to MCP */
+		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+				 DRV_MSG_CODE_UNLOAD_REQ,
+				 DRV_MB_PARAM_UNLOAD_WOL_MCP,
+				 &unload_resp, &unload_param);
+		if (rc) {
+			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
+			unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
+		}
+
+		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+				 DRV_MSG_CODE_UNLOAD_DONE,
+				 0, &unload_resp, &unload_param);
+		if (rc) {
+			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
+{
+	qed_ptt_pool_free(p_hwfn);
+	kfree(p_hwfn->hw_info.p_igu_info);
+}
+
+/* Setup bar access */
+static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+{
+	int rc;
+
+	/* Allocate PTT pool */
+	rc = qed_ptt_pool_alloc(p_hwfn);
+	if (rc)
+		return rc;
+
+	/* Allocate the main PTT */
+	p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+	/* clear indirect access */
+	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+
+	/* Clean Previous errors if such exist */
+	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+	       PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+	       1 << p_hwfn->abs_pf_id);
+
+	/* enable internal target-read */
+	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+	return 0;
+}
+
+static void get_function_id(struct qed_hwfn *p_hwfn)
+{
+	/* ME Register */
+	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
+
+	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+				      PXP_CONCRETE_FID_PFID);
+	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+				    PXP_CONCRETE_FID_PORT);
+}
+
+static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
+{
+	u32 *feat_num = p_hwfn->hw_info.feat_num;
+	int num_features = 1;
+
+	feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
+						num_features,
+					RESC_NUM(p_hwfn, QED_L2_QUEUE));
+	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+		   "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
+		   feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
+		   num_features);
+}
+
+static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+{
+	u32 *resc_start = p_hwfn->hw_info.resc_start;
+	u32 *resc_num = p_hwfn->hw_info.resc_num;
+	int num_funcs, i;
+
+	num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
+				  : p_hwfn->cdev->num_ports_in_engines;
+
+	resc_num[QED_SB] = min_t(u32,
+				 (MAX_SB_PER_PATH_BB / num_funcs),
+				 qed_int_get_num_sbs(p_hwfn, NULL));
+	resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
+	resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
+	resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+	resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
+	resc_num[QED_RL] = 8;
+	resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
+	resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
+			     num_funcs;
+	resc_num[QED_ILT] = 950;
+
+	for (i = 0; i < QED_MAX_RESC; i++)
+		resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
+
+	qed_hw_set_feat(p_hwfn);
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+		   "The numbers for each resource are:\n"
+		   "SB = %d start = %d\n"
+		   "L2_QUEUE = %d start = %d\n"
+		   "VPORT = %d start = %d\n"
+		   "PQ = %d start = %d\n"
+		   "RL = %d start = %d\n"
+		   "MAC = %d start = %d\n"
+		   "VLAN = %d start = %d\n"
+		   "ILT = %d start = %d\n",
+		   p_hwfn->hw_info.resc_num[QED_SB],
+		   p_hwfn->hw_info.resc_start[QED_SB],
+		   p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
+		   p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
+		   p_hwfn->hw_info.resc_num[QED_VPORT],
+		   p_hwfn->hw_info.resc_start[QED_VPORT],
+		   p_hwfn->hw_info.resc_num[QED_PQ],
+		   p_hwfn->hw_info.resc_start[QED_PQ],
+		   p_hwfn->hw_info.resc_num[QED_RL],
+		   p_hwfn->hw_info.resc_start[QED_RL],
+		   p_hwfn->hw_info.resc_num[QED_MAC],
+		   p_hwfn->hw_info.resc_start[QED_MAC],
+		   p_hwfn->hw_info.resc_num[QED_VLAN],
+		   p_hwfn->hw_info.resc_start[QED_VLAN],
+		   p_hwfn->hw_info.resc_num[QED_ILT],
+		   p_hwfn->hw_info.resc_start[QED_ILT]);
+}
+
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
+			       struct qed_ptt *p_ptt)
+{
+	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+	u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
+	struct qed_mcp_link_params *link;
+
+	/* Read global nvm_cfg address */
+	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+	/* Verify MCP has initialized it */
+	if (!nvm_cfg_addr) {
+		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
+		return -EINVAL;
+	}
+
+	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
+	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+	/* Read Vendor Id / Device Id */
+	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+	       offsetof(struct nvm_cfg1, glob) +
+	       offsetof(struct nvm_cfg1_glob, pci_id);
+	p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
+				    NVM_CFG1_GLOB_VENDOR_ID_MASK;
+
+	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+	       offsetof(struct nvm_cfg1, glob) +
+	       offsetof(struct nvm_cfg1_glob, core_cfg);
+
+	core_cfg = qed_rd(p_hwfn, p_ptt, addr);
+
+	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
+		break;
+	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
+		break;
+	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
+		break;
+	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
+		break;
+	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
+		break;
+	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
+		break;
+	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
+		break;
+	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
+		break;
+	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
+		break;
+	default:
+		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
+			  core_cfg);
+		break;
+	}
+
+	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+	       offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
+	       offsetof(struct nvm_cfg1_func, device_id);
+	val = qed_rd(p_hwfn, p_ptt, addr);
+
+	if (IS_MF(p_hwfn)) {
+		p_hwfn->hw_info.device_id =
+			(val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
+			NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
+	} else {
+		p_hwfn->hw_info.device_id =
+			(val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
+			NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
+	}
+
+	/* Read default link configuration */
+	link = &p_hwfn->mcp_info->link_input;
+	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+	link_temp = qed_rd(p_hwfn, p_ptt,
+			   port_cfg_addr +
+			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
+	link->speed.advertised_speeds =
+		link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+
+	p_hwfn->mcp_info->link_capabilities.speed_capabilities =
+						link->speed.advertised_speeds;
+
+	link_temp = qed_rd(p_hwfn, p_ptt,
+			   port_cfg_addr +
+			   offsetof(struct nvm_cfg1_port, link_settings));
+	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+		link->speed.autoneg = true;
+		break;
+	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+		link->speed.forced_speed = 1000;
+		break;
+	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+		link->speed.forced_speed = 10000;
+		break;
+	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+		link->speed.forced_speed = 25000;
+		break;
+	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+		link->speed.forced_speed = 40000;
+		break;
+	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+		link->speed.forced_speed = 50000;
+		break;
+	case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+		link->speed.forced_speed = 100000;
+		break;
+	default:
+		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
+			  link_temp);
+	}
+
+	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
+	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+	link->pause.autoneg = !!(link_temp &
+				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+	link->pause.forced_rx = !!(link_temp &
+				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+	link->pause.forced_tx = !!(link_temp &
+				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+	link->loopback_mode = 0;
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
+		   link->speed.forced_speed, link->speed.advertised_speeds,
+		   link->speed.autoneg, link->pause.autoneg);
+
+	/* Read Multi-function information from shmem */
+	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+	       offsetof(struct nvm_cfg1, glob) +
+	       offsetof(struct nvm_cfg1_glob, generic_cont0);
+
+	generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
+
+	mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+		  NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+	switch (mf_mode) {
+	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+		p_hwfn->cdev->mf_mode = MF_OVLAN;
+		break;
+	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+		p_hwfn->cdev->mf_mode = MF_NPAR;
+		break;
+	case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
+		p_hwfn->cdev->mf_mode = SF;
+		break;
+	}
+	DP_INFO(p_hwfn, "Multi function mode is %08x\n",
+		p_hwfn->cdev->mf_mode);
+
+	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+		struct qed_ptt *p_ptt,
+		enum qed_pci_personality personality)
+{
+	u32 port_mode;
+	int rc;
+
+	/* Read the port mode */
+	port_mode = qed_rd(p_hwfn, p_ptt,
+			   CNIG_REG_NW_PORT_MODE_BB_B0);
+
+	if (port_mode < 3) {
+		p_hwfn->cdev->num_ports_in_engines = 1;
+	} else if (port_mode <= 5) {
+		p_hwfn->cdev->num_ports_in_engines = 2;
+	} else {
+		DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
+			  p_hwfn->cdev->num_ports_in_engines);
+
+		/* Default num_ports_in_engines to something */
+		p_hwfn->cdev->num_ports_in_engines = 1;
+	}
+
+	qed_hw_get_nvm_info(p_hwfn, p_ptt);
+
+	rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
+	if (rc)
+		return rc;
+
+	if (qed_mcp_is_init(p_hwfn))
+		ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
+				p_hwfn->mcp_info->func_info.mac);
+	else
+		eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
+
+	if (qed_mcp_is_init(p_hwfn)) {
+		if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
+			p_hwfn->hw_info.ovlan =
+				p_hwfn->mcp_info->func_info.ovlan;
+
+		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+	}
+
+	if (qed_mcp_is_init(p_hwfn)) {
+		enum qed_pci_personality protocol;
+
+		protocol = p_hwfn->mcp_info->func_info.protocol;
+		p_hwfn->hw_info.personality = protocol;
+	}
+
+	qed_hw_get_resc(p_hwfn);
+
+	return rc;
+}
+
+static void qed_get_dev_info(struct qed_dev *cdev)
+{
+	u32 tmp;
+
+	cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+				     MISCS_REG_CHIP_NUM);
+	cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+				     MISCS_REG_CHIP_REV);
+	MASK_FIELD(CHIP_REV, cdev->chip_rev);
+
+	/* Learn number of HW-functions */
+	tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+		     MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+	if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
+		DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
+		cdev->num_hwfns = 2;
+	} else {
+		cdev->num_hwfns = 1;
+	}
+
+	cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+				    MISCS_REG_CHIP_TEST_REG) >> 4;
+	MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
+	cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
+				       MISCS_REG_CHIP_METAL);
+	MASK_FIELD(CHIP_METAL, cdev->chip_metal);
+
+	DP_INFO(cdev->hwfns,
+		"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+		cdev->chip_num, cdev->chip_rev,
+		cdev->chip_bond_id, cdev->chip_metal);
+}
+
+static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
+				 void __iomem *p_regview,
+				 void __iomem *p_doorbells,
+				 enum qed_pci_personality personality)
+{
+	int rc = 0;
+
+	/* Split PCI bars evenly between hwfns */
+	p_hwfn->regview = p_regview;
+	p_hwfn->doorbells = p_doorbells;
+
+	/* Validate that chip access is feasible */
+	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+		DP_ERR(p_hwfn,
+		       "Reading the ME register returns all Fs; Preventing further chip access\n");
+		return -EINVAL;
+	}
+
+	get_function_id(p_hwfn);
+
+	rc = qed_hw_hwfn_prepare(p_hwfn);
+	if (rc) {
+		DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
+		goto err0;
+	}
+
+	/* First hwfn learns basic information, e.g., number of hwfns */
+	if (!p_hwfn->my_id)
+		qed_get_dev_info(p_hwfn->cdev);
+
+	/* Initialize MCP structure */
+	rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+	if (rc) {
+		DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
+		goto err1;
+	}
+
+	/* Read the device configuration information from the HW and SHMEM */
+	rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
+	if (rc) {
+		DP_NOTICE(p_hwfn, "Failed to get HW information\n");
+		goto err2;
+	}
+
+	/* Allocate the init RT array and initialize the init-ops engine */
+	rc = qed_init_alloc(p_hwfn);
+	if (rc) {
+		DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
+		goto err2;
+	}
+
+	return rc;
+err2:
+	qed_mcp_free(p_hwfn);
+err1:
+	qed_hw_hwfn_free(p_hwfn);
+err0:
+	return rc;
+}
+
+static u32 qed_hw_bar_size(struct qed_dev *cdev,
+			   u8 bar_id)
+{
+	u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0);
+
+	return size / cdev->num_hwfns;
+}
+
+int qed_hw_prepare(struct qed_dev *cdev,
+		   int personality)
+{
+	int rc, i;
+
+	/* Store the precompiled init data ptrs */
+	qed_init_iro_array(cdev);
+
+	/* Initialize the first hwfn - will learn number of hwfns */
+	rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview,
+				   cdev->doorbells, personality);
+	if (rc)
+		return rc;
+
+	personality = cdev->hwfns[0].hw_info.personality;
+
+	/* Initialize the rest of the hwfns */
+	for (i = 1; i < cdev->num_hwfns; i++) {
+		void __iomem *p_regview, *p_doorbell;
+
+		p_regview =  cdev->regview +
+			     i * qed_hw_bar_size(cdev, 0);
+		p_doorbell = cdev->doorbells +
+			     i * qed_hw_bar_size(cdev, 1);
+		rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview,
+					   p_doorbell, personality);
+		if (rc) {
+			/* Cleanup previously initialized hwfns */
+			while (--i >= 0) {
+				qed_init_free(&cdev->hwfns[i]);
+				qed_mcp_free(&cdev->hwfns[i]);
+				qed_hw_hwfn_free(&cdev->hwfns[i]);
+			}
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+void qed_hw_remove(struct qed_dev *cdev)
+{
+	int i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		qed_init_free(p_hwfn);
+		qed_hw_hwfn_free(p_hwfn);
+		qed_mcp_free(p_hwfn);
+	}
+}
+
+int qed_chain_alloc(struct qed_dev *cdev,
+		    enum qed_chain_use_mode intended_use,
+		    enum qed_chain_mode mode,
+		    u16 num_elems,
+		    size_t elem_size,
+		    struct qed_chain *p_chain)
+{
+	dma_addr_t p_pbl_phys = 0;
+	void *p_pbl_virt = NULL;
+	dma_addr_t p_phys = 0;
+	void *p_virt = NULL;
+	u16 page_cnt = 0;
+	size_t size;
+
+	if (mode == QED_CHAIN_MODE_SINGLE)
+		page_cnt = 1;
+	else
+		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+	size = page_cnt * QED_CHAIN_PAGE_SIZE;
+	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+				    size, &p_phys, GFP_KERNEL);
+	if (!p_virt) {
+		DP_NOTICE(cdev, "Failed to allocate chain mem\n");
+		goto nomem;
+	}
+
+	if (mode == QED_CHAIN_MODE_PBL) {
+		size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+		p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+						size, &p_pbl_phys,
+						GFP_KERNEL);
+		if (!p_pbl_virt) {
+			DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
+			goto nomem;
+		}
+
+		qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
+				   (u8)elem_size, intended_use,
+				   p_pbl_phys, p_pbl_virt);
+	} else {
+		qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
+			       (u8)elem_size, intended_use, mode);
+	}
+
+	return 0;
+
+nomem:
+	dma_free_coherent(&cdev->pdev->dev,
+			  page_cnt * QED_CHAIN_PAGE_SIZE,
+			  p_virt, p_phys);
+	dma_free_coherent(&cdev->pdev->dev,
+			  page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
+			  p_pbl_virt, p_pbl_phys);
+
+	return -ENOMEM;
+}
+
+void qed_chain_free(struct qed_dev *cdev,
+		    struct qed_chain *p_chain)
+{
+	size_t size;
+
+	if (!p_chain->p_virt_addr)
+		return;
+
+	if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+		size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+		dma_free_coherent(&cdev->pdev->dev, size,
+				  p_chain->pbl.p_virt_table,
+				  p_chain->pbl.p_phys_table);
+	}
+
+	size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
+	dma_free_coherent(&cdev->pdev->dev, size,
+			  p_chain->p_virt_addr,
+			  p_chain->p_phys_addr);
+}
+
+static void __qed_get_vport_stats(struct qed_dev *cdev,
+				  struct qed_eth_stats  *stats)
+{
+	int i, j;
+
+	memset(stats, 0, sizeof(*stats));
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+		struct eth_mstorm_per_queue_stat mstats;
+		struct eth_ustorm_per_queue_stat ustats;
+		struct eth_pstorm_per_queue_stat pstats;
+		struct tstorm_per_port_stat tstats;
+		struct port_stats port_stats;
+		struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+
+		if (!p_ptt) {
+			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+			continue;
+		}
+
+		memset(&mstats, 0, sizeof(mstats));
+		qed_memcpy_from(p_hwfn, p_ptt, &mstats,
+				p_hwfn->storm_stats.mstats.address,
+				p_hwfn->storm_stats.mstats.len);
+
+		memset(&ustats, 0, sizeof(ustats));
+		qed_memcpy_from(p_hwfn, p_ptt, &ustats,
+				p_hwfn->storm_stats.ustats.address,
+				p_hwfn->storm_stats.ustats.len);
+
+		memset(&pstats, 0, sizeof(pstats));
+		qed_memcpy_from(p_hwfn, p_ptt, &pstats,
+				p_hwfn->storm_stats.pstats.address,
+				p_hwfn->storm_stats.pstats.len);
+
+		memset(&tstats, 0, sizeof(tstats));
+		qed_memcpy_from(p_hwfn, p_ptt, &tstats,
+				p_hwfn->storm_stats.tstats.address,
+				p_hwfn->storm_stats.tstats.len);
+
+		memset(&port_stats, 0, sizeof(port_stats));
+
+		if (p_hwfn->mcp_info)
+			qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+					p_hwfn->mcp_info->port_addr +
+					offsetof(struct public_port, stats),
+					sizeof(port_stats));
+		qed_ptt_release(p_hwfn, p_ptt);
+
+		stats->no_buff_discards +=
+			HILO_64_REGPAIR(mstats.no_buff_discard);
+		stats->packet_too_big_discard +=
+			HILO_64_REGPAIR(mstats.packet_too_big_discard);
+		stats->ttl0_discard +=
+			HILO_64_REGPAIR(mstats.ttl0_discard);
+		stats->tpa_coalesced_pkts +=
+			HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+		stats->tpa_coalesced_events +=
+			HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+		stats->tpa_aborts_num +=
+			HILO_64_REGPAIR(mstats.tpa_aborts_num);
+		stats->tpa_coalesced_bytes +=
+			HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+
+		stats->rx_ucast_bytes +=
+			HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+		stats->rx_mcast_bytes +=
+			HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+		stats->rx_bcast_bytes +=
+			HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+		stats->rx_ucast_pkts +=
+			HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+		stats->rx_mcast_pkts +=
+			HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+		stats->rx_bcast_pkts +=
+			HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+
+		stats->mftag_filter_discards +=
+			HILO_64_REGPAIR(tstats.mftag_filter_discard);
+		stats->mac_filter_discards +=
+			HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+
+		stats->tx_ucast_bytes +=
+			HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+		stats->tx_mcast_bytes +=
+			HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+		stats->tx_bcast_bytes +=
+			HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+		stats->tx_ucast_pkts +=
+			HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+		stats->tx_mcast_pkts +=
+			HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+		stats->tx_bcast_pkts +=
+			HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+		stats->tx_err_drop_pkts +=
+			HILO_64_REGPAIR(pstats.error_drop_pkts);
+		stats->rx_64_byte_packets       += port_stats.pmm.r64;
+		stats->rx_127_byte_packets      += port_stats.pmm.r127;
+		stats->rx_255_byte_packets      += port_stats.pmm.r255;
+		stats->rx_511_byte_packets      += port_stats.pmm.r511;
+		stats->rx_1023_byte_packets     += port_stats.pmm.r1023;
+		stats->rx_1518_byte_packets     += port_stats.pmm.r1518;
+		stats->rx_1522_byte_packets     += port_stats.pmm.r1522;
+		stats->rx_2047_byte_packets     += port_stats.pmm.r2047;
+		stats->rx_4095_byte_packets     += port_stats.pmm.r4095;
+		stats->rx_9216_byte_packets     += port_stats.pmm.r9216;
+		stats->rx_16383_byte_packets    += port_stats.pmm.r16383;
+		stats->rx_crc_errors	    += port_stats.pmm.rfcs;
+		stats->rx_mac_crtl_frames       += port_stats.pmm.rxcf;
+		stats->rx_pause_frames	  += port_stats.pmm.rxpf;
+		stats->rx_pfc_frames	    += port_stats.pmm.rxpp;
+		stats->rx_align_errors	  += port_stats.pmm.raln;
+		stats->rx_carrier_errors	+= port_stats.pmm.rfcr;
+		stats->rx_oversize_packets      += port_stats.pmm.rovr;
+		stats->rx_jabbers	       += port_stats.pmm.rjbr;
+		stats->rx_undersize_packets     += port_stats.pmm.rund;
+		stats->rx_fragments	     += port_stats.pmm.rfrg;
+		stats->tx_64_byte_packets       += port_stats.pmm.t64;
+		stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
+		stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
+		stats->tx_256_to_511_byte_packets  += port_stats.pmm.t511;
+		stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
+		stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
+		stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
+		stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
+		stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
+		stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
+		stats->tx_pause_frames	  += port_stats.pmm.txpf;
+		stats->tx_pfc_frames	    += port_stats.pmm.txpp;
+		stats->tx_lpi_entry_count       += port_stats.pmm.tlpiec;
+		stats->tx_total_collisions      += port_stats.pmm.tncl;
+		stats->rx_mac_bytes	     += port_stats.pmm.rbyte;
+		stats->rx_mac_uc_packets	+= port_stats.pmm.rxuca;
+		stats->rx_mac_mc_packets	+= port_stats.pmm.rxmca;
+		stats->rx_mac_bc_packets	+= port_stats.pmm.rxbca;
+		stats->rx_mac_frames_ok	 += port_stats.pmm.rxpok;
+		stats->tx_mac_bytes	     += port_stats.pmm.tbyte;
+		stats->tx_mac_uc_packets	+= port_stats.pmm.txuca;
+		stats->tx_mac_mc_packets	+= port_stats.pmm.txmca;
+		stats->tx_mac_bc_packets	+= port_stats.pmm.txbca;
+		stats->tx_mac_ctrl_frames       += port_stats.pmm.txcf;
+
+		for (j = 0; j < 8; j++) {
+			stats->brb_truncates += port_stats.brb.brb_truncate[j];
+			stats->brb_discards += port_stats.brb.brb_discard[j];
+		}
+	}
+}
+
+void qed_get_vport_stats(struct qed_dev *cdev,
+			 struct qed_eth_stats *stats)
+{
+	u32 i;
+
+	if (!cdev) {
+		memset(stats, 0, sizeof(*stats));
+		return;
+	}
+
+	__qed_get_vport_stats(cdev, stats);
+
+	if (!cdev->reset_stats)
+		return;
+
+	/* Reduce the statistics baseline */
+	for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
+		((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void qed_reset_vport_stats(struct qed_dev *cdev)
+{
+	int i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+		struct eth_mstorm_per_queue_stat mstats;
+		struct eth_ustorm_per_queue_stat ustats;
+		struct eth_pstorm_per_queue_stat pstats;
+		struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+
+		if (!p_ptt) {
+			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+			continue;
+		}
+
+		memset(&mstats, 0, sizeof(mstats));
+		qed_memcpy_to(p_hwfn, p_ptt,
+			      p_hwfn->storm_stats.mstats.address,
+			      &mstats,
+			      p_hwfn->storm_stats.mstats.len);
+
+		memset(&ustats, 0, sizeof(ustats));
+		qed_memcpy_to(p_hwfn, p_ptt,
+			      p_hwfn->storm_stats.ustats.address,
+			      &ustats,
+			      p_hwfn->storm_stats.ustats.len);
+
+		memset(&pstats, 0, sizeof(pstats));
+		qed_memcpy_to(p_hwfn, p_ptt,
+			      p_hwfn->storm_stats.pstats.address,
+			      &pstats,
+			      p_hwfn->storm_stats.pstats.len);
+
+		qed_ptt_release(p_hwfn, p_ptt);
+	}
+
+	/* PORT statistics are not necessarily reset, so we need to
+	 * read and create a baseline for future statistics.
+	 */
+	if (!cdev->reset_stats)
+		DP_INFO(cdev, "Reset stats not allocated\n");
+	else
+		__qed_get_vport_stats(cdev, cdev->reset_stats);
+}
+
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+		    u16 src_id, u16 *dst_id)
+{
+	if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+		u16 min, max;
+
+		min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+		max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
+		DP_NOTICE(p_hwfn,
+			  "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
+			  src_id, min, max);
+
+		return -EINVAL;
+	}
+
+	*dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
+
+	return 0;
+}
+
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+		 u8 src_id, u8 *dst_id)
+{
+	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
+		u8 min, max;
+
+		min = (u8)RESC_START(p_hwfn, QED_VPORT);
+		max = min + RESC_NUM(p_hwfn, QED_VPORT);
+		DP_NOTICE(p_hwfn,
+			  "vport id [%d] is not valid, available indices [%d - %d]\n",
+			  src_id, min, max);
+
+		return -EINVAL;
+	}
+
+	*dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
+
+	return 0;
+}
+
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+		   u8 src_id, u8 *dst_id)
+{
+	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
+		u8 min, max;
+
+		min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
+		max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
+		DP_NOTICE(p_hwfn,
+			  "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
+			  src_id, min, max);
+
+		return -EINVAL;
+	}
+
+	*dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
new file mode 100644
index 0000000..e29a3ba
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -0,0 +1,283 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DEV_API_H
+#define _QED_DEV_API_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed_int.h"
+
+/**
+ * @brief qed_init_dp - initialize the debug level
+ *
+ * @param cdev
+ * @param dp_module
+ * @param dp_level
+ */
+void qed_init_dp(struct qed_dev *cdev,
+		 u32 dp_module,
+		 u8 dp_level);
+
+/**
+ * @brief qed_init_struct - initialize the device structure to
+ *        its defaults
+ *
+ * @param cdev
+ */
+void qed_init_struct(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_free -
+ *
+ * @param cdev
+ */
+void qed_resc_free(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_alloc -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_resc_alloc(struct qed_dev *cdev);
+
+/**
+ * @brief qed_resc_setup -
+ *
+ * @param cdev
+ */
+void qed_resc_setup(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_init -
+ *
+ * @param cdev
+ * @param b_hw_start
+ * @param int_mode - interrupt mode [msix, inta, etc.] to use.
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ *	  for vports configured for tx-switching.
+ * @param bin_fw_data - binary fw data pointer in binary fw file.
+ *			Pass NULL if not using binary fw file.
+ *
+ * @return int
+ */
+int qed_hw_init(struct qed_dev *cdev,
+		bool b_hw_start,
+		enum qed_int_mode int_mode,
+		bool allow_npar_tx_switch,
+		const u8 *bin_fw_data);
+
+/**
+ * @brief qed_hw_stop -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_stop(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_stop_fastpath -should be called incase
+ *		slowpath is still required for the device,
+ *		but fastpath is not.
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_stop_fastpath(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_start_fastpath -restart fastpath traffic,
+ *		only if hw_stop_fastpath was called
+ *
+ * @param cdev
+ *
+ */
+void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_hw_reset -
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_hw_reset(struct qed_dev *cdev);
+
+/**
+ * @brief qed_hw_prepare -
+ *
+ * @param cdev
+ * @param personality - personality to initialize
+ *
+ * @return int
+ */
+int qed_hw_prepare(struct qed_dev *cdev,
+		   int personality);
+
+/**
+ * @brief qed_hw_remove -
+ *
+ * @param cdev
+ */
+void qed_hw_remove(struct qed_dev *cdev);
+
+/**
+ * @brief qed_ptt_acquire - Allocate a PTT window
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function)
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_ptt
+ */
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_release - Release PTT Window
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt);
+void qed_get_vport_stats(struct qed_dev *cdev,
+			 struct qed_eth_stats   *stats);
+void qed_reset_vport_stats(struct qed_dev *cdev);
+
+enum qed_dmae_address_type_t {
+	QED_DMAE_ADDRESS_HOST_VIRT,
+	QED_DMAE_ADDRESS_HOST_PHYS,
+	QED_DMAE_ADDRESS_GRC
+};
+
+/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define QED_DMAE_FLAG_RW_REPL_SRC       0x00000001
+#define QED_DMAE_FLAG_COMPLETION_DST    0x00000008
+
+struct qed_dmae_params {
+	u32	flags; /* consists of QED_DMAE_FLAG_* values */
+};
+
+/**
+ * @brief qed_dmae_host2grc - copy data from source addr to
+ * dmae registers using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param grc_addr (dmae_data_offset)
+ * @param size_in_dwords
+ * @param flags (one of the flags defined above)
+ */
+int
+qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+		  struct qed_ptt *p_ptt,
+		  u64 source_addr,
+		  u32 grc_addr,
+		  u32 size_in_dwords,
+		  u32 flags);
+
+/**
+ * @brief qed_chain_alloc - Allocate and initialize a chain
+ *
+ * @param p_hwfn
+ * @param intended_use
+ * @param mode
+ * @param num_elems
+ * @param elem_size
+ * @param p_chain
+ *
+ * @return int
+ */
+int
+qed_chain_alloc(struct qed_dev *cdev,
+		enum qed_chain_use_mode intended_use,
+		enum qed_chain_mode mode,
+		u16 num_elems,
+		size_t elem_size,
+		struct qed_chain *p_chain);
+
+/**
+ * @brief qed_chain_free - Free chain DMA memory
+ *
+ * @param p_hwfn
+ * @param p_chain
+ */
+void qed_chain_free(struct qed_dev *cdev,
+		    struct qed_chain *p_chain);
+
+/**
+ * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return int
+ */
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+		    u16 src_id,
+		    u16 *dst_id);
+
+/**
+ * @@brief qed_fw_vport - Get absolute vport ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return int
+ */
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+		 u8 src_id,
+		 u8 *dst_id);
+
+/**
+ * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return int
+ */
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+		   u8 src_id,
+		   u8 *dst_id);
+
+/**
+ * *@brief Cleanup of previous driver remains prior to load
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ *
+ * @return int
+ */
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+		      struct qed_ptt *p_ptt,
+		      u16 id);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
new file mode 100644
index 0000000..b2f8e85
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -0,0 +1,5291 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HSI_H
+#define _QED_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+
+struct qed_hwfn;
+struct qed_ptt;
+/********************************/
+/* Add include to common target */
+/********************************/
+
+/* opcodes for the event ring */
+enum common_event_opcode {
+	COMMON_EVENT_PF_START,
+	COMMON_EVENT_PF_STOP,
+	COMMON_EVENT_RESERVED,
+	COMMON_EVENT_RESERVED2,
+	COMMON_EVENT_RESERVED3,
+	COMMON_EVENT_RESERVED4,
+	COMMON_EVENT_RESERVED5,
+	MAX_COMMON_EVENT_OPCODE
+};
+
+/* Common Ramrod Command IDs */
+enum common_ramrod_cmd_id {
+	COMMON_RAMROD_UNUSED,
+	COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
+	COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
+	COMMON_RAMROD_RESERVED,
+	COMMON_RAMROD_RESERVED2,
+	COMMON_RAMROD_RESERVED3,
+	MAX_COMMON_RAMROD_CMD_ID
+};
+
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+	__le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+	__le32 reserved[4];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+	__le32		spq_base_lo /* SPQ Ring Base Address low dword */;
+	__le32		spq_base_hi /* SPQ Ring Base Address high dword */;
+	struct regpair	consolid_base_addr;
+	__le16		spq_cons /* SPQ Ring Consumer */;
+	__le16		consolid_cons /* Consolidation Ring Consumer */;
+	__le32		reserved0[55] /* Pad to 15 cycles */;
+};
+
+struct xstorm_core_conn_ag_ctx {
+	u8	reserved0 /* cdu_validation */;
+	u8	core_state /* state */;
+	u8	flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1   /* bit6 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1   /* bit7 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
+	u8 flags1;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1   /* bit8 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1   /* bit9 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1   /* bit10 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1   /* bit11 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1   /* bit12 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1   /* bit13 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1   /* bit14 */
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1   /* bit15 */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
+	u8 flags2;
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3   /* timer0cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3   /* timer1cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3   /* timer2cf */
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
+	u8 flags3;
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3   /* cf4 */
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3   /* cf5 */
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3   /* cf6 */
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3   /* cf7 */
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
+	u8 flags4;
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3   /* cf8 */
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3   /* cf9 */
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3   /* cf10 */
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3   /* cf11 */
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
+	u8 flags5;
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3   /* cf12 */
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3   /* cf13 */
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3   /* cf14 */
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3   /* cf15 */
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
+	u8 flags6;
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3   /* cf16 */
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3   /* cf18 */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3   /* cf19 */
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
+	u8 flags7;
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3   /* cf20 */
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3   /* cf21 */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3   /* cf22 */
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1   /* cf0en */
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1   /* cf1en */
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
+	u8 flags8;
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1   /* cf2en */
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1   /* cf3en */
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1   /* cf4en */
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1   /* cf5en */
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1   /* cf6en */
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1   /* cf7en */
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1   /* cf8en */
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1   /* cf9en */
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
+	u8 flags9;
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1   /* cf10en */
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1   /* cf11en */
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1   /* cf12en */
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1   /* cf13en */
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1   /* cf14en */
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1   /* cf15en */
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1   /* cf16en */
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
+	u8 flags10;
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1   /* cf18en */
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1   /* cf19en */
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1   /* cf20en */
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1   /* cf21en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1   /* cf22en */
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1   /* cf23en */
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1   /* rule0en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1   /* rule1en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
+	u8 flags11;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1   /* rule2en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1   /* rule3en */
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1   /* rule4en */
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1   /* rule5en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1   /* rule6en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1   /* rule7en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1   /* rule8en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1   /* rule9en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
+	u8 flags12;
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1   /* rule10en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1   /* rule11en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1   /* rule12en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1   /* rule13en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1   /* rule14en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1   /* rule15en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1   /* rule16en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1   /* rule17en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
+	u8 flags13;
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1   /* rule18en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1   /* rule19en */
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1   /* rule20en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1   /* rule21en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1   /* rule22en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1   /* rule23en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1   /* rule24en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1   /* rule25en */
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
+	u8 flags14;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1   /* bit16 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1   /* bit17 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1   /* bit18 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1   /* bit19 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1   /* bit20 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1   /* bit21 */
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3   /* cf23 */
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
+	u8	byte2 /* byte2 */;
+	__le16	physical_q0 /* physical_q0 */;
+	__le16	consolid_prod /* physical_q1 */;
+	__le16	reserved16 /* physical_q2 */;
+	__le16	tx_bd_cons /* word3 */;
+	__le16	tx_bd_or_spq_prod /* word4 */;
+	__le16	word5 /* word5 */;
+	__le16	conn_dpi /* conn_dpi */;
+	u8	byte3 /* byte3 */;
+	u8	byte4 /* byte4 */;
+	u8	byte5 /* byte5 */;
+	u8	byte6 /* byte6 */;
+	__le32	reg0 /* reg0 */;
+	__le32	reg1 /* reg1 */;
+	__le32	reg2 /* reg2 */;
+	__le32	reg3 /* reg3 */;
+	__le32	reg4 /* reg4 */;
+	__le32	reg5 /* cf_array0 */;
+	__le32	reg6 /* cf_array1 */;
+	__le16	word7 /* word7 */;
+	__le16	word8 /* word8 */;
+	__le16	word9 /* word9 */;
+	__le16	word10 /* word10 */;
+	__le32	reg7 /* reg7 */;
+	__le32	reg8 /* reg8 */;
+	__le32	reg9 /* reg9 */;
+	u8	byte7 /* byte7 */;
+	u8	byte8 /* byte8 */;
+	u8	byte9 /* byte9 */;
+	u8	byte10 /* byte10 */;
+	u8	byte11 /* byte11 */;
+	u8	byte12 /* byte12 */;
+	u8	byte13 /* byte13 */;
+	u8	byte14 /* byte14 */;
+	u8	byte15 /* byte15 */;
+	u8	byte16 /* byte16 */;
+	__le16	word11 /* word11 */;
+	__le32	reg10 /* reg10 */;
+	__le32	reg11 /* reg11 */;
+	__le32	reg12 /* reg12 */;
+	__le32	reg13 /* reg13 */;
+	__le32	reg14 /* reg14 */;
+	__le32	reg15 /* reg15 */;
+	__le32	reg16 /* reg16 */;
+	__le32	reg17 /* reg17 */;
+	__le32	reg18 /* reg18 */;
+	__le32	reg19 /* reg19 */;
+	__le16	word12 /* word12 */;
+	__le16	word13 /* word13 */;
+	__le16	word14 /* word14 */;
+	__le16	word15 /* word15 */;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+	__le32 reserved[24];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+	__le32 reserved[4];
+};
+
+/* core connection context */
+struct core_conn_context {
+	struct ystorm_core_conn_st_ctx	ystorm_st_context;
+	struct regpair			ystorm_st_padding[2] /* padding */;
+	struct pstorm_core_conn_st_ctx	pstorm_st_context;
+	struct regpair			pstorm_st_padding[2];
+	struct xstorm_core_conn_st_ctx	xstorm_st_context;
+	struct xstorm_core_conn_ag_ctx	xstorm_ag_context;
+	struct mstorm_core_conn_st_ctx	mstorm_st_context;
+	struct regpair			mstorm_st_padding[2];
+	struct ustorm_core_conn_st_ctx	ustorm_st_context;
+	struct regpair			ustorm_st_padding[2] /* padding */;
+};
+
+struct eth_mstorm_per_queue_stat {
+	struct regpair  ttl0_discard;
+	struct regpair  packet_too_big_discard;
+	struct regpair  no_buff_discard;
+	struct regpair  not_active_discard;
+	struct regpair  tpa_coalesced_pkts;
+	struct regpair  tpa_coalesced_events;
+	struct regpair  tpa_aborts_num;
+	struct regpair  tpa_coalesced_bytes;
+};
+
+struct eth_pstorm_per_queue_stat {
+	struct regpair  sent_ucast_bytes;
+	struct regpair  sent_mcast_bytes;
+	struct regpair  sent_bcast_bytes;
+	struct regpair  sent_ucast_pkts;
+	struct regpair  sent_mcast_pkts;
+	struct regpair  sent_bcast_pkts;
+	struct regpair  error_drop_pkts;
+};
+
+struct eth_ustorm_per_queue_stat {
+	struct regpair  rcv_ucast_bytes;
+	struct regpair  rcv_mcast_bytes;
+	struct regpair  rcv_bcast_bytes;
+	struct regpair  rcv_ucast_pkts;
+	struct regpair  rcv_mcast_pkts;
+	struct regpair  rcv_bcast_pkts;
+};
+
+/* Event Ring Next Page Address */
+struct event_ring_next_addr {
+	struct regpair	addr /* Next Page Address */;
+	__le32		reserved[2] /* Reserved */;
+};
+
+union event_ring_element {
+	struct event_ring_entry		entry /* Event Ring Entry */;
+	struct event_ring_next_addr	next_addr;
+};
+
+enum personality_type {
+	PERSONALITY_RESERVED,
+	PERSONALITY_RESERVED2,
+	PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */,
+	PERSONALITY_RESERVED3,
+	PERSONALITY_ETH /* Ethernet */,
+	PERSONALITY_RESERVED4,
+	MAX_PERSONALITY_TYPE
+};
+
+struct pf_start_tunnel_config {
+	u8	set_vxlan_udp_port_flg;
+	u8	set_geneve_udp_port_flg;
+	u8	tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+	u8	tx_enable_l2geneve;
+	u8	tx_enable_ipgeneve;
+	u8	tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+	u8	tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+	u8	tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+	u8	tunnel_clss_l2geneve;
+	u8	tunnel_clss_ipgeneve;
+	u8	tunnel_clss_l2gre;
+	u8	tunnel_clss_ipgre;
+	__le16	vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+	__le16	geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+};
+
+/* Ramrod data for PF start ramrod */
+struct pf_start_ramrod_data {
+	struct regpair			event_ring_pbl_addr;
+	struct regpair			consolid_q_pbl_addr;
+	struct pf_start_tunnel_config	tunnel_config;
+	__le16				event_ring_sb_id;
+	u8				base_vf_id;
+	u8				num_vfs;
+	u8				event_ring_num_pages;
+	u8				event_ring_sb_index;
+	u8				path_id;
+	u8				warning_as_error;
+	u8				dont_log_ramrods;
+	u8				personality;
+	__le16				log_type_mask;
+	u8				mf_mode /* Multi function mode */;
+	u8				integ_phase /* Integration phase */;
+	u8				allow_npar_tx_switching;
+	u8				inner_to_outer_pri_map[8];
+	u8				pri_map_valid;
+	u32				outer_tag;
+	u8				reserved0[4];
+};
+
+enum ports_mode {
+	ENGX2_PORTX1 /* 2 engines x 1 port */,
+	ENGX2_PORTX2 /* 2 engines x 2 ports */,
+	ENGX1_PORTX1 /* 1 engine  x 1 port */,
+	ENGX1_PORTX2 /* 1 engine  x 2 ports */,
+	ENGX1_PORTX4 /* 1 engine  x 4 ports */,
+	MAX_PORTS_MODE
+};
+
+/* Ramrod Header of SPQE */
+struct ramrod_header {
+	__le32	cid /* Slowpath Connection CID */;
+	u8	cmd_id /* Ramrod Cmd (Per Protocol Type) */;
+	u8	protocol_id /* Ramrod Protocol ID */;
+	__le16	echo /* Ramrod echo */;
+};
+
+/* Slowpath Element (SPQE) */
+struct slow_path_element {
+	struct ramrod_header	hdr /* Ramrod Header */;
+	struct regpair		data_ptr;
+};
+
+struct tstorm_per_port_stat {
+	struct regpair	trunc_error_discard;
+	struct regpair	mac_error_discard;
+	struct regpair	mftag_filter_discard;
+	struct regpair	eth_mac_filter_discard;
+	struct regpair	ll2_mac_filter_discard;
+	struct regpair	ll2_conn_disabled_discard;
+	struct regpair	iscsi_irregular_pkt;
+	struct regpair	fcoe_irregular_pkt;
+	struct regpair	roce_irregular_pkt;
+	struct regpair	eth_irregular_pkt;
+	struct regpair	toe_irregular_pkt;
+	struct regpair	preroce_irregular_pkt;
+};
+
+struct atten_status_block {
+	__le32	atten_bits;
+	__le32	atten_ack;
+	__le16	reserved0;
+	__le16	sb_index /* status block running index */;
+	__le32	reserved1;
+};
+
+enum block_addr {
+	GRCBASE_GRC		= 0x50000,
+	GRCBASE_MISCS		= 0x9000,
+	GRCBASE_MISC		= 0x8000,
+	GRCBASE_DBU		= 0xa000,
+	GRCBASE_PGLUE_B		= 0x2a8000,
+	GRCBASE_CNIG		= 0x218000,
+	GRCBASE_CPMU		= 0x30000,
+	GRCBASE_NCSI		= 0x40000,
+	GRCBASE_OPTE		= 0x53000,
+	GRCBASE_BMB		= 0x540000,
+	GRCBASE_PCIE		= 0x54000,
+	GRCBASE_MCP		= 0xe00000,
+	GRCBASE_MCP2		= 0x52000,
+	GRCBASE_PSWHST		= 0x2a0000,
+	GRCBASE_PSWHST2		= 0x29e000,
+	GRCBASE_PSWRD		= 0x29c000,
+	GRCBASE_PSWRD2		= 0x29d000,
+	GRCBASE_PSWWR		= 0x29a000,
+	GRCBASE_PSWWR2		= 0x29b000,
+	GRCBASE_PSWRQ		= 0x280000,
+	GRCBASE_PSWRQ2		= 0x240000,
+	GRCBASE_PGLCS		= 0x0,
+	GRCBASE_PTU		= 0x560000,
+	GRCBASE_DMAE		= 0xc000,
+	GRCBASE_TCM		= 0x1180000,
+	GRCBASE_MCM		= 0x1200000,
+	GRCBASE_UCM		= 0x1280000,
+	GRCBASE_XCM		= 0x1000000,
+	GRCBASE_YCM		= 0x1080000,
+	GRCBASE_PCM		= 0x1100000,
+	GRCBASE_QM		= 0x2f0000,
+	GRCBASE_TM		= 0x2c0000,
+	GRCBASE_DORQ		= 0x100000,
+	GRCBASE_BRB		= 0x340000,
+	GRCBASE_SRC		= 0x238000,
+	GRCBASE_PRS		= 0x1f0000,
+	GRCBASE_TSDM		= 0xfb0000,
+	GRCBASE_MSDM		= 0xfc0000,
+	GRCBASE_USDM		= 0xfd0000,
+	GRCBASE_XSDM		= 0xf80000,
+	GRCBASE_YSDM		= 0xf90000,
+	GRCBASE_PSDM		= 0xfa0000,
+	GRCBASE_TSEM		= 0x1700000,
+	GRCBASE_MSEM		= 0x1800000,
+	GRCBASE_USEM		= 0x1900000,
+	GRCBASE_XSEM		= 0x1400000,
+	GRCBASE_YSEM		= 0x1500000,
+	GRCBASE_PSEM		= 0x1600000,
+	GRCBASE_RSS		= 0x238800,
+	GRCBASE_TMLD		= 0x4d0000,
+	GRCBASE_MULD		= 0x4e0000,
+	GRCBASE_YULD		= 0x4c8000,
+	GRCBASE_XYLD		= 0x4c0000,
+	GRCBASE_PRM		= 0x230000,
+	GRCBASE_PBF_PB1		= 0xda0000,
+	GRCBASE_PBF_PB2		= 0xda4000,
+	GRCBASE_RPB		= 0x23c000,
+	GRCBASE_BTB		= 0xdb0000,
+	GRCBASE_PBF		= 0xd80000,
+	GRCBASE_RDIF		= 0x300000,
+	GRCBASE_TDIF		= 0x310000,
+	GRCBASE_CDU		= 0x580000,
+	GRCBASE_CCFC		= 0x2e0000,
+	GRCBASE_TCFC		= 0x2d0000,
+	GRCBASE_IGU		= 0x180000,
+	GRCBASE_CAU		= 0x1c0000,
+	GRCBASE_UMAC		= 0x51000,
+	GRCBASE_XMAC		= 0x210000,
+	GRCBASE_DBG		= 0x10000,
+	GRCBASE_NIG		= 0x500000,
+	GRCBASE_WOL		= 0x600000,
+	GRCBASE_BMBN		= 0x610000,
+	GRCBASE_IPC		= 0x20000,
+	GRCBASE_NWM		= 0x800000,
+	GRCBASE_NWS		= 0x700000,
+	GRCBASE_MS		= 0x6a0000,
+	GRCBASE_PHY_PCIE	= 0x618000,
+	GRCBASE_MISC_AEU	= 0x8000,
+	GRCBASE_BAR0_MAP	= 0x1c00000,
+	MAX_BLOCK_ADDR
+};
+
+enum block_id {
+	BLOCK_GRC,
+	BLOCK_MISCS,
+	BLOCK_MISC,
+	BLOCK_DBU,
+	BLOCK_PGLUE_B,
+	BLOCK_CNIG,
+	BLOCK_CPMU,
+	BLOCK_NCSI,
+	BLOCK_OPTE,
+	BLOCK_BMB,
+	BLOCK_PCIE,
+	BLOCK_MCP,
+	BLOCK_MCP2,
+	BLOCK_PSWHST,
+	BLOCK_PSWHST2,
+	BLOCK_PSWRD,
+	BLOCK_PSWRD2,
+	BLOCK_PSWWR,
+	BLOCK_PSWWR2,
+	BLOCK_PSWRQ,
+	BLOCK_PSWRQ2,
+	BLOCK_PGLCS,
+	BLOCK_PTU,
+	BLOCK_DMAE,
+	BLOCK_TCM,
+	BLOCK_MCM,
+	BLOCK_UCM,
+	BLOCK_XCM,
+	BLOCK_YCM,
+	BLOCK_PCM,
+	BLOCK_QM,
+	BLOCK_TM,
+	BLOCK_DORQ,
+	BLOCK_BRB,
+	BLOCK_SRC,
+	BLOCK_PRS,
+	BLOCK_TSDM,
+	BLOCK_MSDM,
+	BLOCK_USDM,
+	BLOCK_XSDM,
+	BLOCK_YSDM,
+	BLOCK_PSDM,
+	BLOCK_TSEM,
+	BLOCK_MSEM,
+	BLOCK_USEM,
+	BLOCK_XSEM,
+	BLOCK_YSEM,
+	BLOCK_PSEM,
+	BLOCK_RSS,
+	BLOCK_TMLD,
+	BLOCK_MULD,
+	BLOCK_YULD,
+	BLOCK_XYLD,
+	BLOCK_PRM,
+	BLOCK_PBF_PB1,
+	BLOCK_PBF_PB2,
+	BLOCK_RPB,
+	BLOCK_BTB,
+	BLOCK_PBF,
+	BLOCK_RDIF,
+	BLOCK_TDIF,
+	BLOCK_CDU,
+	BLOCK_CCFC,
+	BLOCK_TCFC,
+	BLOCK_IGU,
+	BLOCK_CAU,
+	BLOCK_UMAC,
+	BLOCK_XMAC,
+	BLOCK_DBG,
+	BLOCK_NIG,
+	BLOCK_WOL,
+	BLOCK_BMBN,
+	BLOCK_IPC,
+	BLOCK_NWM,
+	BLOCK_NWS,
+	BLOCK_MS,
+	BLOCK_PHY_PCIE,
+	BLOCK_MISC_AEU,
+	BLOCK_BAR0_MAP,
+	MAX_BLOCK_ID
+};
+
+enum command_type_bit {
+	IGU_COMMAND_TYPE_NOP	= 0,
+	IGU_COMMAND_TYPE_SET	= 1,
+	MAX_COMMAND_TYPE_BIT
+};
+
+struct dmae_cmd {
+	__le32 opcode;
+#define DMAE_CMD_SRC_MASK              0x1
+#define DMAE_CMD_SRC_SHIFT             0
+#define DMAE_CMD_DST_MASK              0x3
+#define DMAE_CMD_DST_SHIFT             1
+#define DMAE_CMD_C_DST_MASK            0x1
+#define DMAE_CMD_C_DST_SHIFT           3
+#define DMAE_CMD_CRC_RESET_MASK        0x1
+#define DMAE_CMD_CRC_RESET_SHIFT       4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK   0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT  5
+#define DMAE_CMD_DST_ADDR_RESET_MASK   0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT  6
+#define DMAE_CMD_COMP_FUNC_MASK        0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT       7
+#define DMAE_CMD_COMP_WORD_EN_MASK     0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT    8
+#define DMAE_CMD_COMP_CRC_EN_MASK      0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT     9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK  0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK        0x1
+#define DMAE_CMD_RESERVED1_SHIFT       13
+#define DMAE_CMD_ENDIANITY_MODE_MASK   0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT  14
+#define DMAE_CMD_ERR_HANDLING_MASK     0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT    16
+#define DMAE_CMD_PORT_ID_MASK          0x3
+#define DMAE_CMD_PORT_ID_SHIFT         18
+#define DMAE_CMD_SRC_PF_ID_MASK        0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT       20
+#define DMAE_CMD_DST_PF_ID_MASK        0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT       24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK  0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK  0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK        0x3
+#define DMAE_CMD_RESERVED2_SHIFT       30
+	__le32	src_addr_lo;
+	__le32	src_addr_hi;
+	__le32	dst_addr_lo;
+	__le32	dst_addr_hi;
+	__le16	length /* Length in DW */;
+	__le16	opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK        0xFF     /* Source VF id */
+#define DMAE_CMD_SRC_VF_ID_SHIFT       0
+#define DMAE_CMD_DST_VF_ID_MASK        0xFF     /* Destination VF id */
+#define DMAE_CMD_DST_VF_ID_SHIFT       8
+	__le32	comp_addr_lo /* PCIe completion address low or grc address */;
+	__le32	comp_addr_hi;
+	__le32	comp_val /* Value to write to copmletion address */;
+	__le32	crc32 /* crc16 result */;
+	__le32	crc_32_c /* crc32_c result */;
+	__le16	crc16 /* crc16 result */;
+	__le16	crc16_c /* crc16_c result */;
+	__le16	crc10 /* crc_t10 result */;
+	__le16	reserved;
+	__le16	xsum16 /* checksum16 result  */;
+	__le16	xsum8 /* checksum8 result  */;
+};
+
+struct igu_cleanup {
+	__le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK     0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT    0
+#define IGU_CLEANUP_CLEANUP_SET_MASK   0x1 /* cleanup clear - 0, set - 1 */
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT  27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK  0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK  0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+	__le32 reserved1;
+};
+
+union igu_command {
+	struct igu_prod_cons_update	prod_cons_update;
+	struct igu_cleanup		cleanup;
+};
+
+struct igu_command_reg_ctrl {
+	__le16	opaque_fid;
+	__le16	igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK  0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK      0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT     12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK  0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+struct igu_mapping_line {
+	__le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK            0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT           0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK    0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT   1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK  0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK         0x1      /* PF-1, VF-0 */
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT        17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK        0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT       18
+#define IGU_MAPPING_LINE_RESERVED_MASK         0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT        24
+};
+
+struct igu_msix_vector {
+	struct regpair	address;
+	__le32		data;
+	__le32		msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK      0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT     0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK     0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT    1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK  0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK     0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT    24
+};
+
+enum init_modes {
+	MODE_BB_A0,
+	MODE_RESERVED,
+	MODE_RESERVED2,
+	MODE_ASIC,
+	MODE_RESERVED3,
+	MODE_RESERVED4,
+	MODE_RESERVED5,
+	MODE_SF,
+	MODE_MF_SD,
+	MODE_MF_SI,
+	MODE_PORTS_PER_ENG_1,
+	MODE_PORTS_PER_ENG_2,
+	MODE_PORTS_PER_ENG_4,
+	MODE_40G,
+	MODE_100G,
+	MODE_EAGLE_ENG1_WORKAROUND,
+	MAX_INIT_MODES
+};
+
+enum init_phases {
+	PHASE_ENGINE,
+	PHASE_PORT,
+	PHASE_PF,
+	PHASE_RESERVED,
+	PHASE_QM_PF,
+	MAX_INIT_PHASES
+};
+
+struct mstorm_core_conn_ag_ctx {
+	u8	byte0 /* cdu_validation */;
+	u8	byte1 /* state */;
+	u8	flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1       /* exist_in_qm0 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1       /* exist_in_qm1 */
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3       /* cf0 */
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3       /* cf1 */
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3       /* cf2 */
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+	u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1       /* cf0en */
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1       /* cf1en */
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1       /* cf2en */
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1       /* rule0en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1       /* rule1en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1       /* rule2en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1       /* rule3en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1       /* rule4en */
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+	__le16	word0 /* word0 */;
+	__le16	word1 /* word1 */;
+	__le32	reg0 /* reg0 */;
+	__le32	reg1 /* reg1 */;
+};
+
+/* per encapsulation type enabling flags */
+struct prs_reg_encapsulation_type_en {
+	u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK     0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT    0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK      0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT     1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK            0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT           2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK            0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT           3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK  0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK   0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT  5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK                0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT               6
+};
+
+enum pxp_tph_st_hint {
+	TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
+	TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
+	TPH_ST_HINT_TARGET,
+	TPH_ST_HINT_TARGET_PRIO,
+	MAX_PXP_TPH_ST_HINT
+};
+
+/* QM hardware structure of enable bypass credit mask */
+struct qm_rf_bypass_mask {
+	u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK    0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT   0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK  0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK      0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT     2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK      0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT     3
+#define QM_RF_BYPASS_MASK_PFRL_MASK       0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT      4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK    0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT   5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK    0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT   6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK  0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/* QM hardware structure of opportunistic credit mask */
+struct qm_rf_opportunistic_mask {
+	__le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT    0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT    1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK       0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT      2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK       0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT      3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK        0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT       4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT    5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT    6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK   0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT  7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK  0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK   0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT  9
+};
+
+/* QM hardware structure of QM map memory */
+struct qm_rf_pq_map {
+	u32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK          0x1         /* PQ active */
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT         0
+#define QM_RF_PQ_MAP_RL_ID_MASK             0xFF        /* RL ID */
+#define QM_RF_PQ_MAP_RL_ID_SHIFT            1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK          0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT         9
+#define QM_RF_PQ_MAP_VOQ_MASK               0x1F        /* VOQ */
+#define QM_RF_PQ_MAP_VOQ_SHIFT              18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK  0x3         /* WRR weight */
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK          0x1         /* RL active */
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT         25
+#define QM_RF_PQ_MAP_RESERVED_MASK          0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT         26
+};
+
+/* SDM operation gen command (generate aggregative interrupt) */
+struct sdm_op_gen {
+	__le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK  0xFFFF      /* completion parameters 0-15 */
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK   0xF         /* completion type 16-19 */
+#define SDM_OP_GEN_COMP_TYPE_SHIFT  16
+#define SDM_OP_GEN_RESERVED_MASK    0xFFF       /* reserved 20-31 */
+#define SDM_OP_GEN_RESERVED_SHIFT   20
+};
+
+struct tstorm_core_conn_ag_ctx {
+	u8	byte0 /* cdu_validation */;
+	u8	byte1 /* state */;
+	u8	flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1       /* exist_in_qm0 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1       /* exist_in_qm1 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1       /* bit2 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1       /* bit3 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1       /* bit4 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1       /* bit5 */
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3       /* timer0cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
+	u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3       /* timer1cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3       /* timer2cf */
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3       /* timer_stop_all */
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3       /* cf4 */
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
+	u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3       /* cf5 */
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3       /* cf6 */
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3       /* cf7 */
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3       /* cf8 */
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
+	u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3       /* cf9 */
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3       /* cf10 */
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1       /* cf0en */
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1       /* cf1en */
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1       /* cf2en */
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1       /* cf3en */
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
+	u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1       /* cf4en */
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1       /* cf5en */
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1       /* cf6en */
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1       /* cf7en */
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1       /* cf8en */
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1       /* cf9en */
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1       /* cf10en */
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1       /* rule0en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+	u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1       /* rule1en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1       /* rule2en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1       /* rule3en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1       /* rule4en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1       /* rule5en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1       /* rule6en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1       /* rule7en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1       /* rule8en */
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+	__le32	reg0 /* reg0 */;
+	__le32	reg1 /* reg1 */;
+	__le32	reg2 /* reg2 */;
+	__le32	reg3 /* reg3 */;
+	__le32	reg4 /* reg4 */;
+	__le32	reg5 /* reg5 */;
+	__le32	reg6 /* reg6 */;
+	__le32	reg7 /* reg7 */;
+	__le32	reg8 /* reg8 */;
+	u8	byte2 /* byte2 */;
+	u8	byte3 /* byte3 */;
+	__le16	word0 /* word0 */;
+	u8	byte4 /* byte4 */;
+	u8	byte5 /* byte5 */;
+	__le16	word1 /* word1 */;
+	__le16	word2 /* conn_dpi */;
+	__le16	word3 /* word3 */;
+	__le32	reg9 /* reg9 */;
+	__le32	reg10 /* reg10 */;
+};
+
+struct ustorm_core_conn_ag_ctx {
+	u8	reserved /* cdu_validation */;
+	u8	byte1 /* state */;
+	u8	flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1       /* exist_in_qm0 */
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1       /* exist_in_qm1 */
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3       /* timer0cf */
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3       /* timer1cf */
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3       /* timer2cf */
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+	u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3       /* timer_stop_all */
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT     0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3       /* cf4 */
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT     2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3       /* cf5 */
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT     4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3       /* cf6 */
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT     6
+	u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1       /* cf0en */
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1       /* cf1en */
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1       /* cf2en */
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1       /* cf3en */
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1       /* cf4en */
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1       /* cf5en */
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1       /* cf6en */
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1       /* rule0en */
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+	u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1       /* rule1en */
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1       /* rule2en */
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1       /* rule3en */
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1       /* rule4en */
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1       /* rule5en */
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1       /* rule6en */
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1       /* rule7en */
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1       /* rule8en */
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+	u8	byte2 /* byte2 */;
+	u8	byte3 /* byte3 */;
+	__le16	word0 /* conn_dpi */;
+	__le16	word1 /* word1 */;
+	__le32	rx_producers /* reg0 */;
+	__le32	reg1 /* reg1 */;
+	__le32	reg2 /* reg2 */;
+	__le32	reg3 /* reg3 */;
+	__le16	word2 /* word2 */;
+	__le16	word3 /* word3 */;
+};
+
+struct ystorm_core_conn_ag_ctx {
+	u8	byte0 /* cdu_validation */;
+	u8	byte1 /* state */;
+	u8	flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1       /* exist_in_qm0 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1       /* exist_in_qm1 */
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3       /* cf0 */
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3       /* cf1 */
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3       /* cf2 */
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+	u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1       /* cf0en */
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1       /* cf1en */
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1       /* cf2en */
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1       /* rule0en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1       /* rule1en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1       /* rule2en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1       /* rule3en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1       /* rule4en */
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+	u8	byte2 /* byte2 */;
+	u8	byte3 /* byte3 */;
+	__le16	word0 /* word0 */;
+	__le32	reg0 /* reg0 */;
+	__le32	reg1 /* reg1 */;
+	__le16	word1 /* word1 */;
+	__le16	word2 /* word2 */;
+	__le16	word3 /* word3 */;
+	__le16	word4 /* word4 */;
+	__le32	reg2 /* reg2 */;
+	__le32	reg3 /* reg3 */;
+};
+
+/*********************************** Init ************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS                   23
+#define MAX_GRC_ADDR                    ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID                    0xffff
+
+/* init pattern size in bytes */
+#define INIT_PATTERN_SIZE_BITS  4
+#define MAX_INIT_PATTERN_SIZE	BIT(INIT_PATTERN_SIZE_BITS)
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE                 8192
+
+/* Global PXP window */
+#define NUM_OF_PXP_WIN                  19
+#define PXP_WIN_DWORD_SIZE_BITS 10
+#define PXP_WIN_DWORD_SIZE		BIT(PXP_WIN_DWORD_SIZE_BITS)
+#define PXP_WIN_BYTE_SIZE_BITS  (PXP_WIN_DWORD_SIZE_BITS + 2)
+#define PXP_WIN_BYTE_SIZE               (PXP_WIN_DWORD_SIZE * 4)
+
+/********************************* GRC Dump **********************************/
+
+/* width of GRC dump register sequence length in bits */
+#define DUMP_SEQ_LEN_BITS                       8
+#define DUMP_SEQ_LEN_MAX_VAL            ((1 << DUMP_SEQ_LEN_BITS) - 1)
+
+/* width of GRC dump memory length in bits */
+#define DUMP_MEM_LEN_BITS                       18
+#define DUMP_MEM_LEN_MAX_VAL            ((1 << DUMP_MEM_LEN_BITS) - 1)
+
+/* width of register type ID in bits */
+#define REG_TYPE_ID_BITS                        6
+#define REG_TYPE_ID_MAX_VAL                     ((1 << REG_TYPE_ID_BITS) - 1)
+
+/* width of block ID in bits */
+#define BLOCK_ID_BITS                           8
+#define BLOCK_ID_MAX_VAL                        ((1 << BLOCK_ID_BITS) - 1)
+
+/******************************** Idle Check *********************************/
+
+/* max number of idle check predicate immediates */
+#define MAX_IDLE_CHK_PRED_IMM           3
+
+/* max number of idle check argument registers */
+#define MAX_IDLE_CHK_READ_REGS          3
+
+/* max number of idle check loops */
+#define MAX_IDLE_CHK_LOOPS                      0x10000
+
+/* max idle check address increment */
+#define MAX_IDLE_CHK_INCREMENT          0x10000
+
+/* inicates an undefined idle check line index */
+#define IDLE_CHK_UNDEFINED_LINE_IDX     0xffffff
+
+/* max number of register values following the idle check header */
+#define IDLE_CHK_MAX_DUMP_REGS          2
+
+/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
+#define IDLE_CHK_QM_RD_WR_PTR           0
+#define IDLE_CHK_QM_RD_WR_BANK          1
+
+/**************************************/
+/* HSI Functions constants and macros */
+/**************************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES                  8
+
+/* the MCP Trace meta data signautre is duplicated in the perl script that
+ * generats the NVRAM images.
+ */
+#define MCP_TRACE_META_IMAGE_SIGNATURE  0x669955aa
+
+/* Binary buffer header */
+struct bin_buffer_hdr {
+	u32	offset;
+	u32	length /* buffer length in bytes */;
+};
+
+/* binary buffer types */
+enum bin_buffer_type {
+	BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
+	BIN_BUF_INIT_CMD /* init commands */,
+	BIN_BUF_INIT_VAL /* init data */,
+	BIN_BUF_INIT_MODE_TREE /* init modes tree */,
+	BIN_BUF_IRO /* internal RAM offsets array */,
+	MAX_BIN_BUFFER_TYPE
+};
+
+/* Chip IDs */
+enum chip_ids {
+	CHIP_BB_A0 /* BB A0 chip ID */,
+	CHIP_BB_B0 /* BB B0 chip ID */,
+	CHIP_K2 /* AH chip ID */,
+	MAX_CHIP_IDS
+};
+
+enum idle_chk_severity_types {
+	IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */,
+	IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+	IDLE_CHK_SEVERITY_WARNING,
+	MAX_IDLE_CHK_SEVERITY_TYPES
+};
+
+struct init_array_raw_hdr {
+	__le32 data;
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK    0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT   0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK  0xFFFFFFF       /* init array params */
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+struct init_array_standard_hdr {
+	__le32 data;
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK  0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK  0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+struct init_array_zipped_hdr {
+	__le32 data;
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK         0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT        0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK  0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+struct init_array_pattern_hdr {
+	__le32 data;
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK          0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT         0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK  0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK   0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT  8
+};
+
+union init_array_hdr {
+	struct init_array_raw_hdr	raw /* raw init array header */;
+	struct init_array_standard_hdr	standard;
+	struct init_array_zipped_hdr	zipped /* zipped init array header */;
+	struct init_array_pattern_hdr	pattern /* pattern init array header */;
+};
+
+enum init_array_types {
+	INIT_ARR_STANDARD /* standard init array */,
+	INIT_ARR_ZIPPED /* zipped init array */,
+	INIT_ARR_PATTERN /* a repeated pattern */,
+	MAX_INIT_ARRAY_TYPES
+};
+
+/* init operation: callback */
+struct init_callback_op {
+	__le32	op_data;
+#define INIT_CALLBACK_OP_OP_MASK        0xF
+#define INIT_CALLBACK_OP_OP_SHIFT       0
+#define INIT_CALLBACK_OP_RESERVED_MASK  0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+	__le16	callback_id /* Callback ID */;
+	__le16	block_id /* Blocks ID */;
+};
+
+/* init comparison types */
+enum init_comparison_types {
+	INIT_COMPARISON_EQ /* init value is included in the init command */,
+	INIT_COMPARISON_OR /* init value is all zeros */,
+	INIT_COMPARISON_AND /* init value is an array of values */,
+	MAX_INIT_COMPARISON_TYPES
+};
+
+/* init operation: delay */
+struct init_delay_op {
+	__le32	op_data;
+#define INIT_DELAY_OP_OP_MASK        0xF
+#define INIT_DELAY_OP_OP_SHIFT       0
+#define INIT_DELAY_OP_RESERVED_MASK  0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+	__le32	delay /* delay in us */;
+};
+
+/* init operation: if_mode */
+struct init_if_mode_op {
+	__le32 op_data;
+#define INIT_IF_MODE_OP_OP_MASK          0xF
+#define INIT_IF_MODE_OP_OP_SHIFT         0
+#define INIT_IF_MODE_OP_RESERVED1_MASK   0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT  4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK  0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+	__le16	reserved2;
+	__le16	modes_buf_offset;
+};
+
+/*  init operation: if_phase */
+struct init_if_phase_op {
+	__le32 op_data;
+#define INIT_IF_PHASE_OP_OP_MASK           0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT          0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK  0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK    0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT   5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK   0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT  16
+	__le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK        0xFF /* Init phase */
+#define INIT_IF_PHASE_OP_PHASE_SHIFT       0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK    0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT   8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK     0xFFFF /* Init phase ID */
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT    16
+};
+
+/* init mode operators */
+enum init_mode_ops {
+	INIT_MODE_OP_NOT /* init mode not operator */,
+	INIT_MODE_OP_OR /* init mode or operator */,
+	INIT_MODE_OP_AND /* init mode and operator */,
+	MAX_INIT_MODE_OPS
+};
+
+/* init operation: raw */
+struct init_raw_op {
+	__le32	op_data;
+#define INIT_RAW_OP_OP_MASK      0xF
+#define INIT_RAW_OP_OP_SHIFT     0
+#define INIT_RAW_OP_PARAM1_MASK  0xFFFFFFF      /* init param 1 */
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+	__le32	param2 /* Init param 2 */;
+};
+
+/* init array params */
+struct init_op_array_params {
+	__le16	size /* array size in dwords */;
+	__le16	offset /* array start offset in dwords */;
+};
+
+/* Write init operation arguments */
+union init_write_args {
+	__le32				inline_val;
+	__le32				zeros_count;
+	__le32				array_offset;
+	struct init_op_array_params	runtime;
+};
+
+/* init operation: write */
+struct init_write_op {
+	__le32 data;
+#define INIT_WRITE_OP_OP_MASK        0xF
+#define INIT_WRITE_OP_OP_SHIFT       0
+#define INIT_WRITE_OP_SOURCE_MASK    0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT   4
+#define INIT_WRITE_OP_RESERVED_MASK  0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK  0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK   0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT  9
+	union init_write_args args /* Write init operation arguments */;
+};
+
+/* init operation: read */
+struct init_read_op {
+	__le32 op_data;
+#define INIT_READ_OP_OP_MASK         0xF
+#define INIT_READ_OP_OP_SHIFT        0
+#define INIT_READ_OP_POLL_COMP_MASK  0x7
+#define INIT_READ_OP_POLL_COMP_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK   0x1
+#define INIT_READ_OP_RESERVED_SHIFT  7
+#define INIT_READ_OP_POLL_MASK       0x1
+#define INIT_READ_OP_POLL_SHIFT      8
+#define INIT_READ_OP_ADDRESS_MASK    0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT   9
+	__le32 expected_val;
+};
+
+/* Init operations union */
+union init_op {
+	struct init_raw_op	raw /* raw init operation */;
+	struct init_write_op	write /* write init operation */;
+	struct init_read_op	read /* read init operation */;
+	struct init_if_mode_op	if_mode /* if_mode init operation */;
+	struct init_if_phase_op if_phase /* if_phase init operation */;
+	struct init_callback_op callback /* callback init operation */;
+	struct init_delay_op	delay /* delay init operation */;
+};
+
+/* Init command operation types */
+enum init_op_types {
+	INIT_OP_READ /* GRC read init command */,
+	INIT_OP_WRITE /* GRC write init command */,
+	INIT_OP_IF_MODE,
+	INIT_OP_IF_PHASE,
+	INIT_OP_DELAY /* delay init command */,
+	INIT_OP_CALLBACK /* callback init command */,
+	MAX_INIT_OP_TYPES
+};
+
+/* init source types */
+enum init_source_types {
+	INIT_SRC_INLINE /* init value is included in the init command */,
+	INIT_SRC_ZEROS /* init value is all zeros */,
+	INIT_SRC_ARRAY /* init value is an array of values */,
+	INIT_SRC_RUNTIME /* init value is provided during runtime */,
+	MAX_INIT_SOURCE_TYPES
+};
+
+/* Internal RAM Offsets macro data */
+struct iro {
+	u32	base /* RAM field offset */;
+	u16	m1 /* multiplier 1 */;
+	u16	m2 /* multiplier 2 */;
+	u16	m3 /* multiplier 3 */;
+	u16	size /* RAM field size */;
+};
+
+/* QM per-port init parameters */
+struct init_qm_port_params {
+	u8	active /* Indicates if this port is active */;
+	u8	num_active_phys_tcs;
+	u16	num_pbf_cmd_lines;
+	u16	num_btb_blocks;
+	__le16	reserved;
+};
+
+/* QM per-PQ init parameters */
+struct init_qm_pq_params {
+	u8	vport_id /* VPORT ID */;
+	u8	tc_id /* TC ID */;
+	u8	wrr_group /* WRR group */;
+	u8	reserved;
+};
+
+/* QM per-vport init parameters */
+struct init_qm_vport_params {
+	u32	vport_rl;
+	u16	vport_wfq;
+	u16	first_tx_pq_id[NUM_OF_TCS];
+};
+
+/* Win 2 */
+#define GTT_BAR0_MAP_REG_IGU_CMD \
+	0x00f000UL
+/* Win 3 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM \
+	0x010000UL
+/* Win 4 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM \
+	0x011000UL
+/* Win 5 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
+	0x012000UL
+/* Win 6 */
+#define GTT_BAR0_MAP_REG_USDM_RAM \
+	0x013000UL
+/* Win 7 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
+	0x014000UL
+/* Win 8 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
+	0x015000UL
+/* Win 9 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM \
+	0x016000UL
+/* Win 10 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM \
+	0x017000UL
+/* Win 11 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM \
+	0x018000UL
+
+/**
+ * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ *
+ * @param pf_id			- physical function ID
+ * @param num_pf_cids	- number of connections used by this PF
+ * @param num_vf_cids	- number of connections used by VFs of this PF
+ * @param num_tids		- number of tasks used by this PF
+ * @param num_pf_pqs	- number of PQs used by this PF
+ * @param num_vf_pqs	- number of PQs used by VFs of this PF
+ *
+ * @return The required host memory size in 4KB units.
+ */
+u32 qed_qm_pf_mem_size(u8	pf_id,
+		       u32	num_pf_cids,
+		       u32	num_vf_cids,
+		       u32	num_tids,
+		       u16	num_pf_pqs,
+		       u16	num_vf_pqs);
+
+struct qed_qm_common_rt_init_params {
+	u8				max_ports_per_engine;
+	u8				max_phys_tcs_per_port;
+	bool				pf_rl_en;
+	bool				pf_wfq_en;
+	bool				vport_rl_en;
+	bool				vport_wfq_en;
+	struct init_qm_port_params	*port_params;
+};
+
+/**
+ * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the
+ * engine phase.
+ *
+ * @param p_hwfn
+ * @param max_ports_per_engine	- max number of ports per engine in HW
+ * @param max_phys_tcs_per_port	- max number of physical TCs per port in HW
+ * @param pf_rl_en				- enable per-PF rate limiters
+ * @param pf_wfq_en				- enable per-PF WFQ
+ * @param vport_rl_en			- enable per-VPORT rate limiters
+ * @param vport_wfq_en			- enable per-VPORT WFQ
+ * @param port_params			- array of size MAX_NUM_PORTS with
+ *						arameters for each port
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_qm_common_rt_init(
+	struct qed_hwfn				*p_hwfn,
+	struct qed_qm_common_rt_init_params	*p_params);
+
+struct qed_qm_pf_rt_init_params {
+	u8				port_id;
+	u8				pf_id;
+	u8				max_phys_tcs_per_port;
+	bool				is_first_pf;
+	u32				num_pf_cids;
+	u32				num_vf_cids;
+	u32				num_tids;
+	u16				start_pq;
+	u16				num_pf_pqs;
+	u16				num_vf_pqs;
+	u8				start_vport;
+	u8				num_vports;
+	u8				pf_wfq;
+	u32				pf_rl;
+	struct init_qm_pq_params	*pq_params;
+	struct init_qm_vport_params	*vport_params;
+};
+
+int qed_qm_pf_rt_init(struct qed_hwfn			*p_hwfn,
+		      struct qed_ptt			*p_ptt,
+		      struct qed_qm_pf_rt_init_params	*p_params);
+
+/**
+ * @brief qed_init_pf_rl  Initializes the rate limit of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt	- ptt window used for writing the registers
+ * @param pf_id	- PF ID
+ * @param pf_rl	- rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_pf_rl(struct qed_hwfn	*p_hwfn,
+		   struct qed_ptt	*p_ptt,
+		   u8			pf_id,
+		   u32			pf_rl);
+
+/**
+ * @brief qed_init_vport_rl  Initializes the rate limit of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt		- ptt window used for writing the registers
+ * @param vport_id	- VPORT ID
+ * @param vport_rl	- rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+
+int qed_init_vport_rl(struct qed_hwfn	*p_hwfn,
+		      struct qed_ptt	*p_ptt,
+		      u8		vport_id,
+		      u32		vport_rl);
+/**
+ * @brief qed_send_qm_stop_cmd  Sends a stop command to the QM
+ *
+ * @param p_hwfn
+ * @param p_ptt	         - ptt window used for writing the registers
+ * @param is_release_cmd - true for release, false for stop.
+ * @param is_tx_pq       - true for Tx PQs, false for Other PQs.
+ * @param start_pq       - first PQ ID to stop
+ * @param num_pqs        - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occurred while waiting
+ *					for QM command done.
+ */
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn	*p_hwfn,
+			  struct qed_ptt	*p_ptt,
+			  bool			is_release_cmd,
+			  bool			is_tx_pq,
+			  u16			start_pq,
+			  u16			num_pqs);
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET			(IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE			(IRO[0].size)
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id)		(IRO[1].base + \
+							 ((port_id) * \
+							  IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE				(IRO[1].size)
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id)	(IRO[2].base +	\
+							 ((vf_id) *	\
+							  IRO[2].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE			(IRO[2].size)
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET			(IRO[3].base)
+#define USTORM_FLR_FINAL_ACK_SIZE			(IRO[3].size)
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id)			(IRO[4].base +	\
+							 ((pf_id) *	\
+							  IRO[4].m1))
+#define USTORM_EQE_CONS_SIZE				(IRO[4].size)
+/* Ustorm Completion ring consumer */
+#define USTORM_CQ_CONS_OFFSET(global_queue_id)		(IRO[5].base +	\
+							 ((global_queue_id) * \
+							  IRO[5].m1))
+#define USTORM_CQ_CONS_SIZE				(IRO[5].size)
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET			(IRO[6].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE			(IRO[6].size)
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET			(IRO[7].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE			(IRO[7].size)
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET			(IRO[8].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE			(IRO[8].size)
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET			(IRO[9].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE			(IRO[9].size)
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET			(IRO[10].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE			(IRO[10].size)
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET			(IRO[11].base)
+#define USTORM_INTEG_TEST_DATA_SIZE			(IRO[11].size)
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id)	(IRO[12].base +	\
+							 ((core_rx_queue_id) * \
+							  IRO[12].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE			(IRO[12].size)
+/* Tstorm LiteL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[13].base + \
+							     ((core_rx_q_id) * \
+							      IRO[13].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE		(IRO[13].size)
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[14].base + \
+							     ((core_rx_q_id) * \
+							      IRO[14].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE		(IRO[14].size)
+/* Pstorm LiteL2 queue statistics */
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_txst_id) (IRO[15].base + \
+							     ((core_txst_id) * \
+							      IRO[15].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE		(IRO[15].size)
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[16].base + \
+						   ((stat_counter_id) *	\
+						    IRO[16].m1))
+#define MSTORM_QUEUE_STAT_SIZE				(IRO[16].size)
+/* Mstorm producers */
+#define MSTORM_PRODS_OFFSET(queue_id)			(IRO[17].base +	\
+							 ((queue_id) *	\
+							  IRO[17].m1))
+#define MSTORM_PRODS_SIZE				(IRO[17].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET			(IRO[18].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE			(IRO[18].size)
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id)	(IRO[19].base +	\
+							((stat_counter_id) * \
+							 IRO[19].m1))
+#define USTORM_QUEUE_STAT_SIZE				(IRO[19].size)
+/* Ustorm queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id)		(IRO[20].base +	\
+							 ((queue_id) *	\
+							  IRO[20].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE			(IRO[20].size)
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id)	(IRO[21].base +	\
+							 ((stat_counter_id) * \
+							  IRO[21].m1))
+#define PSTORM_QUEUE_STAT_SIZE				(IRO[21].size)
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET(pf_id)		(IRO[22].base +	\
+							 ((pf_id) *	\
+							  IRO[22].m1))
+#define TSTORM_ETH_PRS_INPUT_SIZE			(IRO[22].size)
+/* Ystorm queue zone */
+#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id)		(IRO[23].base +	\
+							 ((queue_id) *	\
+							  IRO[23].m1))
+#define YSTORM_ETH_QUEUE_ZONE_SIZE			(IRO[23].size)
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id)		(IRO[24].base +	\
+							 ((rss_id) *	\
+							  IRO[24].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE				(IRO[24].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id)		(IRO[25].base +	\
+							 ((rss_id) *	\
+							  IRO[25].m1))
+#define USTORM_TOE_CQ_PROD_SIZE				(IRO[25].size)
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id)		(IRO[26].base +	\
+							 ((pf_id) *	\
+							  IRO[26].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE			(IRO[26].size)
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id)	(IRO[27].base +	\
+							 ((cmdq_queue_id) * \
+							  IRO[27].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE			(IRO[27].size)
+/* Mstorm rq-cons of given queue-id */
+#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id)		(IRO[28].base +	\
+							 ((rq_queue_id) * \
+							  IRO[28].m1))
+#define MSTORM_SCSI_RQ_CONS_SIZE			(IRO[28].size)
+/* Pstorm RoCE statistics */
+#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id)	(IRO[29].base +	\
+							 ((stat_counter_id) * \
+							  IRO[29].m1))
+#define PSTORM_ROCE_STAT_SIZE				(IRO[29].size)
+/* Tstorm RoCE statistics */
+#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id)	(IRO[30].base +	\
+							 ((stat_counter_id) * \
+							  IRO[30].m1))
+#define TSTORM_ROCE_STAT_SIZE				(IRO[30].size)
+
+static const struct iro iro_arr[31] = {
+	{ 0x10,	  0x0,	 0x0,	0x0,   0x8     },
+	{ 0x4448, 0x60,	 0x0,	0x0,   0x60    },
+	{ 0x498,  0x8,	 0x0,	0x0,   0x4     },
+	{ 0x494,  0x0,	 0x0,	0x0,   0x4     },
+	{ 0x10,	  0x8,	 0x0,	0x0,   0x2     },
+	{ 0x90,	  0x8,	 0x0,	0x0,   0x2     },
+	{ 0x4540, 0x0,	 0x0,	0x0,   0xf8    },
+	{ 0x39e0, 0x0,	 0x0,	0x0,   0xf8    },
+	{ 0x2598, 0x0,	 0x0,	0x0,   0xf8    },
+	{ 0x4350, 0x0,	 0x0,	0x0,   0xf8    },
+	{ 0x52d0, 0x0,	 0x0,	0x0,   0xf8    },
+	{ 0x7a48, 0x0,	 0x0,	0x0,   0xf8    },
+	{ 0x100,  0x8,	 0x0,	0x0,   0x8     },
+	{ 0x5808, 0x10,	 0x0,	0x0,   0x10    },
+	{ 0xb100, 0x30,	 0x0,	0x0,   0x30    },
+	{ 0x95c0, 0x30,	 0x0,	0x0,   0x30    },
+	{ 0x54f8, 0x40,	 0x0,	0x0,   0x40    },
+	{ 0x200,  0x10,	 0x0,	0x0,   0x8     },
+	{ 0x9e70, 0x0,	 0x0,	0x0,   0x4     },
+	{ 0x7ca0, 0x40,	 0x0,	0x0,   0x30    },
+	{ 0xd00,  0x8,	 0x0,	0x0,   0x8     },
+	{ 0x2790, 0x80,	 0x0,	0x0,   0x38    },
+	{ 0xa520, 0xf0,	 0x0,	0x0,   0xf0    },
+	{ 0x80,	  0x8,	 0x0,	0x0,   0x8     },
+	{ 0xac0,  0x8,	 0x0,	0x0,   0x8     },
+	{ 0x2580, 0x8,	 0x0,	0x0,   0x8     },
+	{ 0x2500, 0x8,	 0x0,	0x0,   0x8     },
+	{ 0x440,  0x8,	 0x0,	0x0,   0x2     },
+	{ 0x1800, 0x8,	 0x0,	0x0,   0x2     },
+	{ 0x27c8, 0x80,	 0x0,	0x0,   0x10    },
+	{ 0x4710, 0x10,	 0x0,	0x0,   0x10    },
+};
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET                                0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET                                1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET                                2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET                                3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET                                4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET                                5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET                                6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET                                7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET                                8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET                                9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET                                10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET                                11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET                                12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET                                13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET                                14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET                                15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET                                  16
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET                              17
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET                              18
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET                               19
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET                               20
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET                            21
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET                           22
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET                             23
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                                 760
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE                                   736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                                 760
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE                                   736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET                                1496
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE                                  736
+#define CAU_REG_PI_MEMORY_RT_OFFSET                                     2232
+#define CAU_REG_PI_MEMORY_RT_SIZE                                       4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET                    6648
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET                      6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET                      6650
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET                         6651
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET                         6652
+#define PRS_REG_SEARCH_TCP_RT_OFFSET                                    6653
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET                                   6654
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET                                   6655
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET                           6656
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET                           6657
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET                               6658
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET                     6659
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET           6660
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET                      6661
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET                         6662
+#define SRC_REG_FIRSTFREE_RT_OFFSET                                     6663
+#define SRC_REG_FIRSTFREE_RT_SIZE                                       2
+#define SRC_REG_LASTFREE_RT_OFFSET                                      6665
+#define SRC_REG_LASTFREE_RT_SIZE                                        2
+#define SRC_REG_COUNTFREE_RT_OFFSET                                     6667
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET                              6668
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET                                6669
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET                                6670
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET                                  6671
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET                                  6672
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET                                 6673
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET                               6674
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET                                6675
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET                               6676
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET                                6677
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET                              6678
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET                               6679
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET                             6680
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET                              6681
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET                             6682
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET                              6683
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET                             6684
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET                              6685
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET                     6686
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET                   6687
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET                   6688
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET                               6689
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET                             6690
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET                             6691
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET                           6692
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET                         6693
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET                         6694
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET                                    6695
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET                                6696
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET                                    6697
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET                                    6698
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET                              6699
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET                              6700
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET                                 6701
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE                                   22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET                                   28701
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET                           28702
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET                              28703
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET                              28704
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET                              28705
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET                                 28706
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET                                 28707
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET                                 28708
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET                     28709
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET                     28710
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET                                28711
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE                                  416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET                                29127
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE                                  512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET                                    29639
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET                                    29640
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET                                    29641
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET                               29642
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET                               29643
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET                               29644
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET                               29645
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET                               29646
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET                               29647
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET                               29648
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET                               29649
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET                               29650
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET                               29651
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET                              29652
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET                              29653
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET                              29654
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET                              29655
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET                              29656
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET                              29657
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET                              29658
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET                              29659
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET                              29660
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET                              29661
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET                              29662
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET                              29663
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET                              29664
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET                              29665
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET                              29666
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET                              29667
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET                              29668
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET                              29669
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET                              29670
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET                              29671
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET                              29672
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET                              29673
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET                              29674
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET                              29675
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET                              29676
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET                              29677
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET                              29678
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET                              29679
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET                              29680
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET                              29681
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET                              29682
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET                              29683
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET                              29684
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET                              29685
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET                              29686
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET                              29687
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET                              29688
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET                              29689
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET                              29690
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET                              29691
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET                              29692
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET                              29693
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET                              29694
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET                              29695
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET                              29696
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET                              29697
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET                              29698
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET                              29699
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET                              29700
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET                              29701
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET                              29702
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET                              29703
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET                              29704
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET                              29705
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET                                29706
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE                                  128
+#define QM_REG_VOQCRDLINE_RT_OFFSET                                     29834
+#define QM_REG_VOQCRDLINE_RT_SIZE                                       20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET                                 29854
+#define QM_REG_VOQINITCRDLINE_RT_SIZE                                   20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET                             29874
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET                             29875
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET                              29876
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET                            29877
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET                           29878
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET                                29879
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET                                29880
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET                                29881
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET                                29882
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET                                29883
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET                                29884
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET                                29885
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET                                29886
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET                                29887
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET                                29888
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET                               29889
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET                               29890
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET                               29891
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET                               29892
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET                               29893
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET                               29894
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET                            29895
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET                            29896
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET                            29897
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET                            29898
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET                               29899
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET                               29900
+#define QM_REG_PQTX2PF_0_RT_OFFSET                                      29901
+#define QM_REG_PQTX2PF_1_RT_OFFSET                                      29902
+#define QM_REG_PQTX2PF_2_RT_OFFSET                                      29903
+#define QM_REG_PQTX2PF_3_RT_OFFSET                                      29904
+#define QM_REG_PQTX2PF_4_RT_OFFSET                                      29905
+#define QM_REG_PQTX2PF_5_RT_OFFSET                                      29906
+#define QM_REG_PQTX2PF_6_RT_OFFSET                                      29907
+#define QM_REG_PQTX2PF_7_RT_OFFSET                                      29908
+#define QM_REG_PQTX2PF_8_RT_OFFSET                                      29909
+#define QM_REG_PQTX2PF_9_RT_OFFSET                                      29910
+#define QM_REG_PQTX2PF_10_RT_OFFSET                                     29911
+#define QM_REG_PQTX2PF_11_RT_OFFSET                                     29912
+#define QM_REG_PQTX2PF_12_RT_OFFSET                                     29913
+#define QM_REG_PQTX2PF_13_RT_OFFSET                                     29914
+#define QM_REG_PQTX2PF_14_RT_OFFSET                                     29915
+#define QM_REG_PQTX2PF_15_RT_OFFSET                                     29916
+#define QM_REG_PQTX2PF_16_RT_OFFSET                                     29917
+#define QM_REG_PQTX2PF_17_RT_OFFSET                                     29918
+#define QM_REG_PQTX2PF_18_RT_OFFSET                                     29919
+#define QM_REG_PQTX2PF_19_RT_OFFSET                                     29920
+#define QM_REG_PQTX2PF_20_RT_OFFSET                                     29921
+#define QM_REG_PQTX2PF_21_RT_OFFSET                                     29922
+#define QM_REG_PQTX2PF_22_RT_OFFSET                                     29923
+#define QM_REG_PQTX2PF_23_RT_OFFSET                                     29924
+#define QM_REG_PQTX2PF_24_RT_OFFSET                                     29925
+#define QM_REG_PQTX2PF_25_RT_OFFSET                                     29926
+#define QM_REG_PQTX2PF_26_RT_OFFSET                                     29927
+#define QM_REG_PQTX2PF_27_RT_OFFSET                                     29928
+#define QM_REG_PQTX2PF_28_RT_OFFSET                                     29929
+#define QM_REG_PQTX2PF_29_RT_OFFSET                                     29930
+#define QM_REG_PQTX2PF_30_RT_OFFSET                                     29931
+#define QM_REG_PQTX2PF_31_RT_OFFSET                                     29932
+#define QM_REG_PQTX2PF_32_RT_OFFSET                                     29933
+#define QM_REG_PQTX2PF_33_RT_OFFSET                                     29934
+#define QM_REG_PQTX2PF_34_RT_OFFSET                                     29935
+#define QM_REG_PQTX2PF_35_RT_OFFSET                                     29936
+#define QM_REG_PQTX2PF_36_RT_OFFSET                                     29937
+#define QM_REG_PQTX2PF_37_RT_OFFSET                                     29938
+#define QM_REG_PQTX2PF_38_RT_OFFSET                                     29939
+#define QM_REG_PQTX2PF_39_RT_OFFSET                                     29940
+#define QM_REG_PQTX2PF_40_RT_OFFSET                                     29941
+#define QM_REG_PQTX2PF_41_RT_OFFSET                                     29942
+#define QM_REG_PQTX2PF_42_RT_OFFSET                                     29943
+#define QM_REG_PQTX2PF_43_RT_OFFSET                                     29944
+#define QM_REG_PQTX2PF_44_RT_OFFSET                                     29945
+#define QM_REG_PQTX2PF_45_RT_OFFSET                                     29946
+#define QM_REG_PQTX2PF_46_RT_OFFSET                                     29947
+#define QM_REG_PQTX2PF_47_RT_OFFSET                                     29948
+#define QM_REG_PQTX2PF_48_RT_OFFSET                                     29949
+#define QM_REG_PQTX2PF_49_RT_OFFSET                                     29950
+#define QM_REG_PQTX2PF_50_RT_OFFSET                                     29951
+#define QM_REG_PQTX2PF_51_RT_OFFSET                                     29952
+#define QM_REG_PQTX2PF_52_RT_OFFSET                                     29953
+#define QM_REG_PQTX2PF_53_RT_OFFSET                                     29954
+#define QM_REG_PQTX2PF_54_RT_OFFSET                                     29955
+#define QM_REG_PQTX2PF_55_RT_OFFSET                                     29956
+#define QM_REG_PQTX2PF_56_RT_OFFSET                                     29957
+#define QM_REG_PQTX2PF_57_RT_OFFSET                                     29958
+#define QM_REG_PQTX2PF_58_RT_OFFSET                                     29959
+#define QM_REG_PQTX2PF_59_RT_OFFSET                                     29960
+#define QM_REG_PQTX2PF_60_RT_OFFSET                                     29961
+#define QM_REG_PQTX2PF_61_RT_OFFSET                                     29962
+#define QM_REG_PQTX2PF_62_RT_OFFSET                                     29963
+#define QM_REG_PQTX2PF_63_RT_OFFSET                                     29964
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET                                   29965
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET                                   29966
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET                                   29967
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET                                   29968
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET                                   29969
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET                                   29970
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET                                   29971
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET                                   29972
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET                                   29973
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET                                   29974
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET                                  29975
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET                                  29976
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET                                  29977
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET                                  29978
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET                                  29979
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET                                  29980
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET                                 29981
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET                                 29982
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET                            29983
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET                            29984
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET                              29985
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET                              29986
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET                              29987
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET                              29988
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET                              29989
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET                              29990
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET                              29991
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET                              29992
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET                                   29993
+#define QM_REG_RLGLBLINCVAL_RT_SIZE                                     256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET                               30249
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE                                 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET                                      30505
+#define QM_REG_RLGLBLCRD_RT_SIZE                                        256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET                                   30761
+#define QM_REG_RLPFPERIOD_RT_OFFSET                                     30762
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET                                30763
+#define QM_REG_RLPFINCVAL_RT_OFFSET                                     30764
+#define QM_REG_RLPFINCVAL_RT_SIZE                                       16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET                                 30780
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE                                   16
+#define QM_REG_RLPFCRD_RT_OFFSET                                        30796
+#define QM_REG_RLPFCRD_RT_SIZE                                          16
+#define QM_REG_RLPFENABLE_RT_OFFSET                                     30812
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET                                  30813
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET                                    30814
+#define QM_REG_WFQPFWEIGHT_RT_SIZE                                      16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET                                30830
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE                                  16
+#define QM_REG_WFQPFCRD_RT_OFFSET                                       30846
+#define QM_REG_WFQPFCRD_RT_SIZE                                         160
+#define QM_REG_WFQPFENABLE_RT_OFFSET                                    31006
+#define QM_REG_WFQVPENABLE_RT_OFFSET                                    31007
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET                                   31008
+#define QM_REG_BASEADDRTXPQ_RT_SIZE                                     512
+#define QM_REG_TXPQMAP_RT_OFFSET                                        31520
+#define QM_REG_TXPQMAP_RT_SIZE                                          512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET                                    32032
+#define QM_REG_WFQVPWEIGHT_RT_SIZE                                      512
+#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET                                32544
+#define QM_REG_WFQVPUPPERBOUND_RT_SIZE                                  512
+#define QM_REG_WFQVPCRD_RT_OFFSET                                       33056
+#define QM_REG_WFQVPCRD_RT_SIZE                                         512
+#define QM_REG_WFQVPMAP_RT_OFFSET                                       33568
+#define QM_REG_WFQVPMAP_RT_SIZE                                         512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET                                   34080
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE                                     160
+#define NIG_REG_LLH_CLS_TYPE_DUALMODE_RT_OFFSET                         34240
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET                         34241
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET                         34242
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET                         34243
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET                         34244
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET                          34245
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET                      34246
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET                               34247
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE                                 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET                          34251
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE                            4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET                            34255
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE                              4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET                               34259
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET                         34260
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE                           32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET                            34292
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE                              16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET                          34308
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE                            16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET                 34324
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE                   16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET                       34340
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE                         16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET                                  34356
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET                               34357
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET                               34358
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET                               34359
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET                           34360
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET                           34361
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET                           34362
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET                           34363
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET                        34364
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET                        34365
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET                        34366
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET                        34367
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET                            34368
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET                         34369
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET                          34370
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET                        34371
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET                           34372
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET                    34373
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET                        34374
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET                           34375
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET                    34376
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET                        34377
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET                           34378
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET                    34379
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET                        34380
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET                           34381
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET                    34382
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET                        34383
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET                           34384
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET                    34385
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET                        34386
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET                           34387
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET                    34388
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET                        34389
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET                           34390
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET                    34391
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET                        34392
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET                           34393
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET                    34394
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET                        34395
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET                           34396
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET                    34397
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET                        34398
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET                           34399
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET                    34400
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET                       34401
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET                          34402
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET                   34403
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET                       34404
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET                          34405
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET                   34406
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET                       34407
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET                          34408
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET                   34409
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET                       34410
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET                          34411
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET                   34412
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET                       34413
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET                          34414
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET                   34415
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET                       34416
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET                          34417
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET                   34418
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET                       34419
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET                          34420
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET                   34421
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET                       34422
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET                          34423
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET                   34424
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET                       34425
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET                          34426
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET                   34427
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET                       34428
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET                          34429
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET                   34430
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET                                    34431
+
+#define RUNTIME_ARRAY_SIZE 34432
+
+/* The eth storm context for the Ystorm */
+struct ystorm_eth_conn_st_ctx {
+	__le32 reserved[4];
+};
+
+/* The eth storm context for the Pstorm */
+struct pstorm_eth_conn_st_ctx {
+	__le32 reserved[8];
+};
+
+/* The eth storm context for the Xstorm */
+struct xstorm_eth_conn_st_ctx {
+	__le32 reserved[60];
+};
+
+struct xstorm_eth_conn_ag_ctx {
+	u8	reserved0 /* cdu_validation */;
+	u8	eth_state /* state */;
+	u8	flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK               0x1 /* bit4 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK               0x1 /* bit6 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK               0x1 /* bit7 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT              7
+	u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK               0x1 /* bit8 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK               0x1 /* bit9 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK               0x1 /* bit10 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK                   0x1 /* bit11 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT                  3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK                   0x1 /* bit12 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT                  4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK                   0x1 /* bit13 */
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT                  5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1 /* bit14 */
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1 /* bit15 */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+	u8 flags2;
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK                     0x3 /* timer0cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT                    0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK                     0x3 /* timer1cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT                    2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3 /* timer2cf */
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT                    6
+	u8 flags3;
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK                     0x3 /* cf4 */
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT                    0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK                     0x3 /* cf5 */
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT                    2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK                     0x3 /* cf6 */
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT                    4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK                     0x3 /* cf7 */
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT                    6
+	u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK                     0x3 /* cf8 */
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT                    0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK                     0x3 /* cf9 */
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT                    2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK                    0x3 /* cf10 */
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT                   4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK                    0x3 /* cf11 */
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT                   6
+	u8 flags5;
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK                    0x3 /* cf12 */
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT                   0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK                    0x3 /* cf13 */
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT                   2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK                    0x3 /* cf14 */
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT                   4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK                    0x3 /* cf15 */
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT                   6
+	u8 flags6;
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3 /* cf16 */
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK                   0x3 /* cf18 */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK            0x3 /* cf19 */
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+	u8 flags7;
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK                0x3 /* cf20 */
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK              0x3 /* cf21 */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK               0x3 /* cf22 */
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK                   0x1 /* cf0en */
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK                   0x1 /* cf1en */
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT                  7
+	u8 flags8;
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1 /* cf2en */
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK                   0x1 /* cf3en */
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK                   0x1 /* cf4en */
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK                   0x1 /* cf5en */
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK                   0x1 /* cf6en */
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK                   0x1 /* cf7en */
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK                   0x1 /* cf8en */
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK                   0x1 /* cf9en */
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT                  7
+	u8 flags9;
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK                  0x1 /* cf10en */
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK                  0x1 /* cf11en */
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK                  0x1 /* cf12en */
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK                  0x1 /* cf13en */
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK                  0x1 /* cf14en */
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK                  0x1 /* cf15en */
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1 /* cf16en */
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+	u8 flags10;
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK                0x1 /* cf18en */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1 /* cf19en */
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1 /* cf20en */
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK              0x1 /* cf21en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1 /* cf22en */
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1 /* cf23en */
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK              0x1 /* rule0en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK              0x1 /* rule1en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT             7
+	u8 flags11;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK              0x1 /* rule2en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK              0x1 /* rule3en */
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1 /* rule4en */
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                 0x1 /* rule5en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                 0x1 /* rule6en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                 0x1 /* rule7en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK            0x1 /* rule8en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK                 0x1 /* rule9en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT                7
+	u8 flags12;
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK                0x1 /* rule10en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK                0x1 /* rule11en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK            0x1 /* rule12en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK            0x1 /* rule13en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK                0x1 /* rule14en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK                0x1 /* rule15en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK                0x1 /* rule16en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK                0x1 /* rule17en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT               7
+	u8 flags13;
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK                0x1 /* rule18en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK                0x1 /* rule19en */
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK            0x1 /* rule20en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK            0x1 /* rule21en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK            0x1 /* rule22en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK            0x1 /* rule23en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK            0x1 /* rule24en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK            0x1 /* rule25en */
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+	u8 flags14;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1 /* bit16 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1 /* bit17 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1 /* bit18 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1 /* bit19 */
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1 /* bit20 */
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1 /* bit21 */
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK              0x3 /* cf23 */
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+	u8	edpm_event_id /* byte2 */;
+	__le16	physical_q0 /* physical_q0 */;
+	__le16	word1 /* physical_q1 */;
+	__le16	edpm_num_bds /* physical_q2 */;
+	__le16	tx_bd_cons /* word3 */;
+	__le16	tx_bd_prod /* word4 */;
+	__le16	go_to_bd_cons /* word5 */;
+	__le16	conn_dpi /* conn_dpi */;
+	u8	byte3 /* byte3 */;
+	u8	byte4 /* byte4 */;
+	u8	byte5 /* byte5 */;
+	u8	byte6 /* byte6 */;
+	__le32	reg0 /* reg0 */;
+	__le32	reg1 /* reg1 */;
+	__le32	reg2 /* reg2 */;
+	__le32	reg3 /* reg3 */;
+	__le32	reg4 /* reg4 */;
+	__le32	reg5 /* cf_array0 */;
+	__le32	reg6 /* cf_array1 */;
+	__le16	word7 /* word7 */;
+	__le16	word8 /* word8 */;
+	__le16	word9 /* word9 */;
+	__le16	word10 /* word10 */;
+	__le32	reg7 /* reg7 */;
+	__le32	reg8 /* reg8 */;
+	__le32	reg9 /* reg9 */;
+	u8	byte7 /* byte7 */;
+	u8	byte8 /* byte8 */;
+	u8	byte9 /* byte9 */;
+	u8	byte10 /* byte10 */;
+	u8	byte11 /* byte11 */;
+	u8	byte12 /* byte12 */;
+	u8	byte13 /* byte13 */;
+	u8	byte14 /* byte14 */;
+	u8	byte15 /* byte15 */;
+	u8	byte16 /* byte16 */;
+	__le16	word11 /* word11 */;
+	__le32	reg10 /* reg10 */;
+	__le32	reg11 /* reg11 */;
+	__le32	reg12 /* reg12 */;
+	__le32	reg13 /* reg13 */;
+	__le32	reg14 /* reg14 */;
+	__le32	reg15 /* reg15 */;
+	__le32	reg16 /* reg16 */;
+	__le32	reg17 /* reg17 */;
+	__le32	reg18 /* reg18 */;
+	__le32	reg19 /* reg19 */;
+	__le16	word12 /* word12 */;
+	__le16	word13 /* word13 */;
+	__le16	word14 /* word14 */;
+	__le16	word15 /* word15 */;
+};
+
+/* The eth storm context for the Tstorm */
+struct tstorm_eth_conn_st_ctx {
+	__le32 reserved[4];
+};
+
+/* The eth storm context for the Mstorm */
+struct mstorm_eth_conn_st_ctx {
+	__le32 reserved[8];
+};
+
+/* The eth storm context for the Ustorm */
+struct ustorm_eth_conn_st_ctx {
+	__le32 reserved[40];
+};
+
+/* eth connection context */
+struct eth_conn_context {
+	struct ystorm_eth_conn_st_ctx	ystorm_st_context;
+	struct regpair			ystorm_st_padding[2] /* padding */;
+	struct pstorm_eth_conn_st_ctx	pstorm_st_context;
+	struct regpair			pstorm_st_padding[2] /* padding */;
+	struct xstorm_eth_conn_st_ctx	xstorm_st_context;
+	struct xstorm_eth_conn_ag_ctx	xstorm_ag_context;
+	struct tstorm_eth_conn_st_ctx	tstorm_st_context;
+	struct regpair			tstorm_st_padding[2] /* padding */;
+	struct mstorm_eth_conn_st_ctx	mstorm_st_context;
+	struct ustorm_eth_conn_st_ctx	ustorm_st_context;
+};
+
+enum eth_filter_action {
+	ETH_FILTER_ACTION_REMOVE,
+	ETH_FILTER_ACTION_ADD,
+	ETH_FILTER_ACTION_REPLACE,
+	MAX_ETH_FILTER_ACTION
+};
+
+struct eth_filter_cmd {
+	u8      type /* Filter Type (MAC/VLAN/Pair/VNI) */;
+	u8      vport_id /* the vport id */;
+	u8      action /* filter command action: add/remove/replace */;
+	u8      reserved0;
+	__le32  vni;
+	__le16  mac_lsb;
+	__le16  mac_mid;
+	__le16  mac_msb;
+	__le16  vlan_id;
+};
+
+struct eth_filter_cmd_header {
+	u8      rx;
+	u8      tx;
+	u8      cmd_cnt;
+	u8      assert_on_error;
+	u8      reserved1[4];
+};
+
+enum eth_filter_type {
+	ETH_FILTER_TYPE_MAC,
+	ETH_FILTER_TYPE_VLAN,
+	ETH_FILTER_TYPE_PAIR,
+	ETH_FILTER_TYPE_INNER_MAC,
+	ETH_FILTER_TYPE_INNER_VLAN,
+	ETH_FILTER_TYPE_INNER_PAIR,
+	ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
+	ETH_FILTER_TYPE_MAC_VNI_PAIR,
+	ETH_FILTER_TYPE_VNI,
+	MAX_ETH_FILTER_TYPE
+};
+
+enum eth_ramrod_cmd_id {
+	ETH_RAMROD_UNUSED,
+	ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
+	ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
+	ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
+	ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
+	ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
+	ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
+	ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+	ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
+	ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
+	ETH_RAMROD_RESERVED,
+	ETH_RAMROD_RESERVED2,
+	ETH_RAMROD_RESERVED3,
+	ETH_RAMROD_RESERVED4,
+	ETH_RAMROD_RESERVED5,
+	ETH_RAMROD_RESERVED6,
+	ETH_RAMROD_RESERVED7,
+	ETH_RAMROD_RESERVED8,
+	MAX_ETH_RAMROD_CMD_ID
+};
+
+struct eth_vport_rss_config {
+	__le16 capabilities;
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK	0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT       0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK	0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT       1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT   2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT   3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT   4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT   5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK  0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK     0x1
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT    7
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK     0x1
+#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT    8
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK	      0x7F
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT	     9
+	u8      rss_id;
+	u8      rss_mode;
+	u8      update_rss_key;
+	u8      update_rss_ind_table;
+	u8      update_rss_capabilities;
+	u8      tbl_size;
+	__le32  reserved2[2];
+	__le16  indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+	__le32  rss_key[ETH_RSS_KEY_SIZE_REGS];
+	__le32  reserved3[2];
+};
+
+enum eth_vport_rss_mode {
+	ETH_VPORT_RSS_MODE_DISABLED,
+	ETH_VPORT_RSS_MODE_REGULAR,
+	MAX_ETH_VPORT_RSS_MODE
+};
+
+struct eth_vport_rx_mode {
+	__le16 state;
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK	  0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT	 0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK	0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT       1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK  0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK	  0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT	 3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK	0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT       4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK	0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT       5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK	       0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT	      6
+	__le16 reserved2[3];
+};
+
+struct eth_vport_tpa_param {
+	u64     reserved[2];
+};
+
+struct eth_vport_tx_mode {
+	__le16 state;
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK    0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT   0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK    0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT   2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK	 0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT	5
+	__le16 reserved2[3];
+};
+
+struct rx_queue_start_ramrod_data {
+	__le16	  rx_queue_id;
+	__le16	  num_of_pbl_pages;
+	__le16	  bd_max_bytes;
+	__le16	  sb_id;
+	u8	      sb_index;
+	u8	      vport_id;
+	u8	      default_rss_queue_flg;
+	u8	      complete_cqe_flg;
+	u8	      complete_event_flg;
+	u8	      stats_counter_id;
+	u8	      pin_context;
+	u8	      pxp_tph_valid_bd;
+	u8	      pxp_tph_valid_pkt;
+	u8	      pxp_st_hint;
+	__le16	  pxp_st_index;
+	u8	      reserved[4];
+	struct regpair  cqe_pbl_addr;
+	struct regpair  bd_base;
+	struct regpair  sge_base;
+};
+
+struct rx_queue_stop_ramrod_data {
+	__le16  rx_queue_id;
+	u8      complete_cqe_flg;
+	u8      complete_event_flg;
+	u8      vport_id;
+	u8      reserved[3];
+};
+
+struct rx_queue_update_ramrod_data {
+	__le16	  rx_queue_id;
+	u8	      complete_cqe_flg;
+	u8	      complete_event_flg;
+	u8	      init_sge_ring_flg;
+	u8	      vport_id;
+	u8	      pxp_tph_valid_sge;
+	u8	      pxp_st_hint;
+	__le16	  pxp_st_index;
+	u8	      reserved[6];
+	struct regpair  sge_base;
+};
+
+struct tx_queue_start_ramrod_data {
+	__le16  sb_id;
+	u8      sb_index;
+	u8      vport_id;
+	u8      tc;
+	u8      stats_counter_id;
+	__le16  qm_pq_id;
+	u8      flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK  0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK      0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT     1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK      0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT     2
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK	      0x1F
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT	     3
+	u8	      pin_context;
+	u8	      pxp_tph_valid_bd;
+	u8	      pxp_tph_valid_pkt;
+	__le16	  pxp_st_index;
+	u8	      pxp_st_hint;
+	u8	      reserved1[3];
+	__le16	  queue_zone_id;
+	__le16	  test_dup_count;
+	__le16	  pbl_size;
+	struct regpair  pbl_base_addr;
+};
+
+struct tx_queue_stop_ramrod_data {
+	__le16 reserved[4];
+};
+
+struct vport_filter_update_ramrod_data {
+	struct eth_filter_cmd_header    filter_cmd_hdr;
+	struct eth_filter_cmd	   filter_cmds[ETH_FILTER_RULES_COUNT];
+};
+
+struct vport_start_ramrod_data {
+	u8			      vport_id;
+	u8			      sw_fid;
+	__le16			  mtu;
+	u8			      drop_ttl0_en;
+	u8			      inner_vlan_removal_en;
+	struct eth_vport_rx_mode	rx_mode;
+	struct eth_vport_tx_mode	tx_mode;
+	struct eth_vport_tpa_param      tpa_param;
+	__le16			  sge_buff_size;
+	u8			      max_sges_num;
+	u8			      tx_switching_en;
+	u8			      anti_spoofing_en;
+	u8			      default_vlan_en;
+	u8			      handle_ptp_pkts;
+	u8			      silent_vlan_removal_en;
+	__le16			  default_vlan;
+	u8			      untagged;
+	u8			      reserved[7];
+};
+
+struct vport_stop_ramrod_data {
+	u8      vport_id;
+	u8      reserved[7];
+};
+
+struct vport_update_ramrod_data_cmn {
+	u8      vport_id;
+	u8      update_rx_active_flg;
+	u8      rx_active_flg;
+	u8      update_tx_active_flg;
+	u8      tx_active_flg;
+	u8      update_rx_mode_flg;
+	u8      update_tx_mode_flg;
+	u8      update_approx_mcast_flg;
+	u8      update_rss_flg;
+	u8      update_inner_vlan_removal_en_flg;
+	u8      inner_vlan_removal_en;
+	u8      update_tpa_param_flg;
+	u8      update_tpa_en_flg;
+	u8      update_sge_param_flg;
+	__le16  sge_buff_size;
+	u8      max_sges_num;
+	u8      update_tx_switching_en_flg;
+	u8      tx_switching_en;
+	u8      update_anti_spoofing_en_flg;
+	u8      anti_spoofing_en;
+	u8      update_handle_ptp_pkts;
+	u8      handle_ptp_pkts;
+	u8      update_default_vlan_en_flg;
+	u8      default_vlan_en;
+	u8      update_default_vlan_flg;
+	__le16  default_vlan;
+	u8      update_accept_any_vlan_flg;
+	u8      accept_any_vlan;
+	u8      silent_vlan_removal_en;
+	u8      reserved;
+};
+
+struct vport_update_ramrod_mcast {
+	__le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+};
+
+struct vport_update_ramrod_data {
+	struct vport_update_ramrod_data_cmn     common;
+	struct eth_vport_rx_mode		rx_mode;
+	struct eth_vport_tx_mode		tx_mode;
+	struct eth_vport_tpa_param	      tpa_param;
+	struct vport_update_ramrod_mcast	approx_mcast;
+	struct eth_vport_rss_config	     rss_config;
+};
+
+struct mstorm_eth_conn_ag_ctx {
+	u8	byte0 /* cdu_validation */;
+	u8	byte1 /* state */;
+	u8	flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK  0x1   /* exist_in_qm0 */
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK          0x1   /* exist_in_qm1 */
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK           0x3   /* cf0 */
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK           0x3   /* cf1 */
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK           0x3   /* cf2 */
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+	u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK         0x1   /* cf0en */
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK         0x1   /* cf1en */
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK         0x1   /* cf2en */
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK       0x1   /* rule0en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK       0x1   /* rule1en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK       0x1   /* rule2en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK       0x1   /* rule3en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK       0x1   /* rule4en */
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+	__le16	word0 /* word0 */;
+	__le16	word1 /* word1 */;
+	__le32	reg0 /* reg0 */;
+	__le32	reg1 /* reg1 */;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+	u8	byte0 /* cdu_validation */;
+	u8	byte1 /* state */;
+	u8	flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK      0x1       /* exist_in_qm0 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT     0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK      0x1       /* exist_in_qm1 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT     1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK      0x1       /* bit2 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT     2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK      0x1       /* bit3 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT     3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK      0x1       /* bit4 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT     4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK      0x1       /* bit5 */
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT     5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK       0x3       /* timer0cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT      6
+	u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK       0x3       /* timer1cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT      0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK       0x3       /* timer2cf */
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT      2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK       0x3       /* timer_stop_all */
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT      4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK       0x3       /* cf4 */
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT      6
+	u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK       0x3       /* cf5 */
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT      0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK       0x3       /* cf6 */
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT      2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK       0x3       /* cf7 */
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT      4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK       0x3       /* cf8 */
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT      6
+	u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK       0x3       /* cf9 */
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT      0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK      0x3       /* cf10 */
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT     2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK     0x1       /* cf0en */
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT    4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK     0x1       /* cf1en */
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT    5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK     0x1       /* cf2en */
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT    6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK     0x1       /* cf3en */
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT    7
+	u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK     0x1       /* cf4en */
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT    0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK     0x1       /* cf5en */
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT    1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK     0x1       /* cf6en */
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT    2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK     0x1       /* cf7en */
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT    3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK     0x1       /* cf8en */
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT    4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK     0x1       /* cf9en */
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT    5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK    0x1       /* cf10en */
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT   6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK   0x1       /* rule0en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT  7
+	u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK   0x1       /* rule1en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT  0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK   0x1       /* rule2en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT  1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK   0x1       /* rule3en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT  2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK   0x1       /* rule4en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT  3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK   0x1       /* rule5en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT  4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK  0x1       /* rule6en */
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK   0x1       /* rule7en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT  6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK   0x1       /* rule8en */
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT  7
+	__le32	reg0 /* reg0 */;
+	__le32	reg1 /* reg1 */;
+	__le32	reg2 /* reg2 */;
+	__le32	reg3 /* reg3 */;
+	__le32	reg4 /* reg4 */;
+	__le32	reg5 /* reg5 */;
+	__le32	reg6 /* reg6 */;
+	__le32	reg7 /* reg7 */;
+	__le32	reg8 /* reg8 */;
+	u8	byte2 /* byte2 */;
+	u8	byte3 /* byte3 */;
+	__le16	rx_bd_cons /* word0 */;
+	u8	byte4 /* byte4 */;
+	u8	byte5 /* byte5 */;
+	__le16	rx_bd_prod /* word1 */;
+	__le16	word2 /* conn_dpi */;
+	__le16	word3 /* word3 */;
+	__le32	reg9 /* reg9 */;
+	__le32	reg10 /* reg10 */;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+	u8	byte0 /* cdu_validation */;
+	u8	byte1 /* state */;
+	u8	flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK                  0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK                  0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                 1
+#define USTORM_ETH_CONN_AG_CTX_CF0_MASK                   0x3   /* timer0cf */
+#define USTORM_ETH_CONN_AG_CTX_CF0_SHIFT                  2
+#define USTORM_ETH_CONN_AG_CTX_CF1_MASK                   0x3   /* timer1cf */
+#define USTORM_ETH_CONN_AG_CTX_CF1_SHIFT                  4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK                   0x3   /* timer2cf */
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT                  6
+	u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK                   0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT                  0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK             0x3   /* cf4 */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT            2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK             0x3   /* cf5 */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT            4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK     0x3   /* cf6 */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT    6
+	u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_CF0EN_MASK                 0x1   /* cf0en */
+#define USTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT                0
+#define USTORM_ETH_CONN_AG_CTX_CF1EN_MASK                 0x1   /* cf1en */
+#define USTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT                1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK                 0x1   /* cf2en */
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK                 0x1   /* cf3en */
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK          0x1   /* cf4en */
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT         4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK          0x1   /* cf5en */
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT         5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK  0x1   /* cf6en */
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK               0x1   /* rule0en */
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT              7
+	u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK               0x1   /* rule1en */
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT              0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK               0x1   /* rule2en */
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT              1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK               0x1   /* rule3en */
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT              2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK               0x1   /* rule4en */
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT              3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK               0x1   /* rule5en */
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT              4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK               0x1   /* rule6en */
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT              5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK               0x1   /* rule7en */
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT              6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK               0x1   /* rule8en */
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT              7
+	u8	byte2 /* byte2 */;
+	u8	byte3 /* byte3 */;
+	__le16	word0 /* conn_dpi */;
+	__le16	tx_bd_cons /* word1 */;
+	__le32	reg0 /* reg0 */;
+	__le32	reg1 /* reg1 */;
+	__le32	reg2 /* reg2 */;
+	__le32	reg3 /* reg3 */;
+	__le16	tx_drv_bd_cons /* word2 */;
+	__le16	rx_drv_cqe_cons /* word3 */;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+	u8	reserved0 /* cdu_validation */;
+	u8	eth_state /* state */;
+	u8	flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+	u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+	u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+	u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+	u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+	u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+	u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK                   0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK            0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+	u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK                0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK              0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK               0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+	u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+	u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+	u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+	u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+	u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+	u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+	u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK              0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+	u8	edpm_event_id /* byte2 */;
+	__le16	physical_q0 /* physical_q0 */;
+	__le16	word1 /* physical_q1 */;
+	__le16	edpm_num_bds /* physical_q2 */;
+	__le16	tx_bd_cons /* word3 */;
+	__le16	tx_bd_prod /* word4 */;
+	__le16	go_to_bd_cons /* word5 */;
+	__le16	conn_dpi /* conn_dpi */;
+};
+
+#define VF_MAX_STATIC 192       /* In case of K2 */
+
+#define MCP_GLOB_PATH_MAX       2
+#define MCP_PORT_MAX            2       /* Global */
+#define MCP_GLOB_PORT_MAX       4       /* Global */
+#define MCP_GLOB_FUNC_MAX       16      /* Global */
+
+typedef u32 offsize_t;                  /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT    0
+#define OFFSIZE_OFFSET_MASK     0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT      16
+#define OFFSIZE_SIZE_MASK       0xffff0000
+
+/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
+#define SECTION_OFFSET(_offsize)        ((((_offsize &		    \
+					    OFFSIZE_OFFSET_MASK) >> \
+					   OFFSIZE_OFFSET_SHIFT) << 2))
+
+/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */
+#define QED_SECTION_SIZE(_offsize)              (((_offsize &		 \
+						   OFFSIZE_SIZE_MASK) >> \
+						  OFFSIZE_SIZE_SHIFT) << 2)
+
+/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
+ * within section.
+ */
+#define SECTION_ADDR(_offsize, idx)     (MCP_REG_SCRATCH +	    \
+					 SECTION_OFFSET(_offsize) + \
+					 (QED_SECTION_SIZE(_offsize) * idx))
+
+/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address.
+ * Use offsetof, since the OFFSETUP collide with the firmware definition
+ */
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base +		     \
+						   offsetof(struct	     \
+							    mcp_public_data, \
+							    sections[_section]))
+/* PHY configuration */
+struct pmm_phy_cfg {
+	u32	speed;
+#define PMM_SPEED_AUTONEG   0
+
+	u32	pause;  /* bitmask */
+#define PMM_PAUSE_NONE          0x0
+#define PMM_PAUSE_AUTONEG       0x1
+#define PMM_PAUSE_RX            0x2
+#define PMM_PAUSE_TX            0x4
+
+	u32	adv_speed;  /* Default should be the speed_cap_mask */
+	u32	loopback_mode;
+#define PMM_LOOPBACK_NONE               0
+#define PMM_LOOPBACK_INT_PHY    1
+#define PMM_LOOPBACK_EXT_PHY    2
+#define PMM_LOOPBACK_EXT                3
+#define PMM_LOOPBACK_MAC                4
+
+	/* features */
+	u32 feature_config_flags;
+};
+
+struct port_mf_cfg {
+	u32	dynamic_cfg; /* device control channel */
+#define PORT_MF_CFG_OV_TAG_MASK              0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT             0
+#define PORT_MF_CFG_OV_TAG_DEFAULT         PORT_MF_CFG_OV_TAG_MASK
+
+	u32	reserved[1];
+};
+
+/* DO NOT add new fields in the middle
+ * MUST be synced with struct pmm_stats_map
+ */
+struct pmm_stats {
+	u64	r64;    /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
+	u64	r127;   /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
+	u64	r255;
+	u64	r511;
+	u64	r1023;
+	u64	r1518;
+	u64	r1522;
+	u64	r2047;
+	u64	r4095;
+	u64	r9216;
+	u64	r16383;
+	u64	rfcs;   /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
+	u64	rxcf;   /* 0x10 (Offset 0x60 ) RX control frame counter*/
+	u64	rxpf;   /* 0x11 (Offset 0x68 ) RX pause frame counter*/
+	u64	rxpp;   /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
+	u64	raln;   /* 0x16 (Offset 0x78 ) RX alignment error counter*/
+	u64	rfcr;   /* 0x19 (Offset 0x80 ) RX false carrier counter */
+	u64	rovr;   /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
+	u64	rjbr;   /* 0x1B (Offset 0x90 ) RX jabber frame counter */
+	u64	rund;   /* 0x34 (Offset 0x98 ) RX undersized frame counter */
+	u64	rfrg;   /* 0x35 (Offset 0xa0 ) RX fragment counter */
+	u64	t64;    /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
+	u64	t127;
+	u64	t255;
+	u64	t511;
+	u64	t1023;
+	u64	t1518;
+	u64	t2047;
+	u64	t4095;
+	u64	t9216;
+	u64	t16383;
+	u64	txpf;   /* 0x50 (Offset 0xf8 ) TX pause frame counter */
+	u64	txpp;   /* 0x51 (Offset 0x100) TX PFC frame counter */
+	u64	tlpiec;
+	u64	tncl;
+	u64	rbyte;  /* 0x3d (Offset 0x118) RX byte counter */
+	u64	rxuca;  /* 0x0c (Offset 0x120) RX UC frame counter */
+	u64	rxmca;  /* 0x0d (Offset 0x128) RX MC frame counter */
+	u64	rxbca;  /* 0x0e (Offset 0x130) RX BC frame counter */
+	u64	rxpok;
+	u64	tbyte;  /* 0x6f (Offset 0x140) TX byte counter */
+	u64	txuca;  /* 0x4d (Offset 0x148) TX UC frame counter */
+	u64	txmca;  /* 0x4e (Offset 0x150) TX MC frame counter */
+	u64	txbca;  /* 0x4f (Offset 0x158) TX BC frame counter */
+	u64	txcf;   /* 0x54 (Offset 0x160) TX control frame counter */
+};
+
+struct brb_stats {
+	u64	brb_truncate[8];
+	u64	brb_discard[8];
+};
+
+struct port_stats {
+	struct brb_stats	brb;
+	struct pmm_stats	pmm;
+};
+
+#define CMT_TEAM0 0
+#define CMT_TEAM1 1
+#define CMT_TEAM_MAX 2
+
+struct couple_mode_teaming {
+	u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM		BIT(0)
+
+#define PORT_CMT_PORT_ROLE		BIT(1)
+#define PORT_CMT_PORT_INACTIVE      (0 << 1)
+#define PORT_CMT_PORT_ACTIVE		BIT(1)
+
+#define PORT_CMT_TEAM_MASK		BIT(2)
+#define PORT_CMT_TEAM0              (0 << 2)
+#define PORT_CMT_TEAM1			BIT(2)
+};
+
+/**************************************
+*     LLDP and DCBX HSI structures
+**************************************/
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL           32
+#define MAX_SYSTEM_LLDP_TLV_DATA    32
+
+enum lldp_agent_e {
+	LLDP_NEAREST_BRIDGE = 0,
+	LLDP_NEAREST_NON_TPMR_BRIDGE,
+	LLDP_NEAREST_CUSTOMER_BRIDGE,
+	LLDP_MAX_LLDP_AGENTS
+};
+
+struct lldp_config_params_s {
+	u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK        0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT       0
+#define LLDP_CONFIG_HOLD_MASK               0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT              8
+#define LLDP_CONFIG_MAX_CREDIT_MASK         0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT        12
+#define LLDP_CONFIG_ENABLE_RX_MASK          0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT         30
+#define LLDP_CONFIG_ENABLE_TX_MASK          0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT         31
+	u32	local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+	u32	local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+	u32	prefix_seq_num;
+	u32	status; /* TBD */
+
+	/* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+	u32	peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+
+	/* Holds remote Port ID TLV header, subtype and 9B of payload. */
+	u32	peer_port_id[LLDP_PORT_ID_STAT_LEN];
+	u32	suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+	u32 flags;
+#define DCBX_ETS_ENABLED_MASK                   0x00000001
+#define DCBX_ETS_ENABLED_SHIFT                  0
+#define DCBX_ETS_WILLING_MASK                   0x00000002
+#define DCBX_ETS_WILLING_SHIFT                  1
+#define DCBX_ETS_ERROR_MASK                     0x00000004
+#define DCBX_ETS_ERROR_SHIFT                    2
+#define DCBX_ETS_CBS_MASK                       0x00000008
+#define DCBX_ETS_CBS_SHIFT                      3
+#define DCBX_ETS_MAX_TCS_MASK                   0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT                  4
+	u32	pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC                       4
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET         (DCBX_ISCSI_OOO_TC + 1)
+	u32	tc_bw_tbl[2];
+	u32	tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT                     0
+#define DCBX_ETS_TSA_CBS                        1
+#define DCBX_ETS_TSA_ETS                        2
+};
+
+struct dcbx_app_priority_entry {
+	u32 entry;
+#define DCBX_APP_PRI_MAP_MASK       0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT      0
+#define DCBX_APP_PRI_0              0x01
+#define DCBX_APP_PRI_1              0x02
+#define DCBX_APP_PRI_2              0x04
+#define DCBX_APP_PRI_3              0x08
+#define DCBX_APP_PRI_4              0x10
+#define DCBX_APP_PRI_5              0x20
+#define DCBX_APP_PRI_6              0x40
+#define DCBX_APP_PRI_7              0x80
+#define DCBX_APP_SF_MASK            0x00000300
+#define DCBX_APP_SF_SHIFT           8
+#define DCBX_APP_SF_ETHTYPE         0
+#define DCBX_APP_SF_PORT            1
+#define DCBX_APP_PROTOCOL_ID_MASK   0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT  16
+};
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+	u32 flags;
+#define DCBX_APP_ENABLED_MASK           0x00000001
+#define DCBX_APP_ENABLED_SHIFT          0
+#define DCBX_APP_WILLING_MASK           0x00000002
+#define DCBX_APP_WILLING_SHIFT          1
+#define DCBX_APP_ERROR_MASK             0x00000004
+#define DCBX_APP_ERROR_SHIFT            2
+/* Not in use
+ * #define DCBX_APP_DEFAULT_PRI_MASK       0x00000f00
+ * #define DCBX_APP_DEFAULT_PRI_SHIFT      8
+ */
+#define DCBX_APP_MAX_TCS_MASK           0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT          12
+#define DCBX_APP_NUM_ENTRIES_MASK       0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT      16
+	struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+	/* PG feature */
+	struct dcbx_ets_feature ets;
+
+	/* PFC feature */
+	u32			pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK             0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT            0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0            0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1            0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2            0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3            0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4            0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5            0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6            0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7            0x80
+
+#define DCBX_PFC_FLAGS_MASK                     0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT                    8
+#define DCBX_PFC_CAPS_MASK                      0x00000f00
+#define DCBX_PFC_CAPS_SHIFT                     8
+#define DCBX_PFC_MBC_MASK                       0x00004000
+#define DCBX_PFC_MBC_SHIFT                      14
+#define DCBX_PFC_WILLING_MASK                   0x00008000
+#define DCBX_PFC_WILLING_SHIFT                  15
+#define DCBX_PFC_ENABLED_MASK                   0x00010000
+#define DCBX_PFC_ENABLED_SHIFT                  16
+#define DCBX_PFC_ERROR_MASK                     0x00020000
+#define DCBX_PFC_ERROR_SHIFT                    17
+
+	/* APP feature */
+	struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+	u32 config;
+#define DCBX_CONFIG_VERSION_MASK            0x00000003
+#define DCBX_CONFIG_VERSION_SHIFT           0
+#define DCBX_CONFIG_VERSION_DISABLED        0
+#define DCBX_CONFIG_VERSION_IEEE            1
+#define DCBX_CONFIG_VERSION_CEE             2
+
+	u32			flags;
+	struct dcbx_features	features;
+};
+
+struct dcbx_mib {
+	u32	prefix_seq_num;
+	u32	flags;
+	struct dcbx_features	features;
+	u32			suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+	u16	valid;
+	u16	length;
+	u32	data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      G L O B A L   */
+/*                                    */
+/**************************************/
+struct public_global {
+	u32				max_path;
+#define MAX_PATH_BIG_BEAR       2
+#define MAX_PATH_K2             1
+	u32				max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+	u32				debug_mb_offset;
+	u32				phymod_dbg_mb_offset;
+	struct couple_mode_teaming	cmt;
+	s32				internal_temperature;
+	u32				mfw_ver;
+	u32				running_bundle_id;
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      P A T H       */
+/*                                    */
+/**************************************/
+
+/****************************************************************************
+* Shared Memory 2 Region                                                   *
+****************************************************************************/
+/* The fw_flr_ack is actually built in the following way:                   */
+/* 8 bit:  PF ack                                                           */
+/* 128 bit: VF ack                                                           */
+/* 8 bit:  ios_dis_ack                                                      */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it     */
+/* access arrays(it expects always the VF to reside after the PF, and that  */
+/* makes the calculation much easier for it. )                              */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition    */
+/* above                                                                    */
+/****************************************************************************/
+struct fw_flr_mb {
+	u32	aggint;
+	u32	opgen_addr;
+	u32	accum_ack;  /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
+#define ACCUM_ACK_PF_BASE       0
+#define ACCUM_ACK_PF_SHIFT      0
+
+#define ACCUM_ACK_VF_BASE       8
+#define ACCUM_ACK_VF_SHIFT      3
+
+#define ACCUM_ACK_IOV_DIS_BASE  256
+#define ACCUM_ACK_IOV_DIS_SHIFT 8
+};
+
+struct public_path {
+	struct fw_flr_mb	flr_mb;
+	u32			mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+	u32			process_kill;
+#define PROCESS_KILL_COUNTER_MASK               0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT              0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK          0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT         16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      P O R T       */
+/*                                    */
+/**************************************/
+
+/****************************************************************************
+* Driver <-> FW Mailbox                                                    *
+****************************************************************************/
+
+struct public_port {
+	u32 validity_map;   /* 0x0 (4*2 = 0x8) */
+
+	/* validity bits */
+#define MCP_VALIDITY_PCI_CFG                    0x00100000
+#define MCP_VALIDITY_MB                         0x00200000
+#define MCP_VALIDITY_DEV_INFO                   0x00400000
+#define MCP_VALIDITY_RESERVED                   0x00000007
+
+	/* One licensing bit should be set */
+#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK     0x00000038
+#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT    0x00000008
+#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT  0x00000010
+#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT       0x00000020
+
+	/* Active MFW */
+#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN         0x00000000
+#define MCP_VALIDITY_ACTIVE_MFW_MASK            0x000001c0
+#define MCP_VALIDITY_ACTIVE_MFW_NCSI            0x00000040
+#define MCP_VALIDITY_ACTIVE_MFW_NONE            0x000001c0
+
+	u32 link_status;
+#define LINK_STATUS_LINK_UP \
+	0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK                       0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD		BIT(1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD            (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G                        (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G                        (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G                        (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G                        (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G                       (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G                        (8 << 1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED                      0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE                     0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED                     0x00000080
+
+#define LINK_STATUS_PFC_ENABLED	\
+	0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE        0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE        0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE            0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE            0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE            0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE            0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE           0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE            0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK      0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE      (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE	BIT(18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE       (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE                     (3 << 18)
+
+#define LINK_STATUS_SFP_TX_FAULT \
+	0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED                     0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED                     0x00400000
+
+	u32			link_status1;
+	u32			ext_phy_fw_version;
+	u32			drv_phy_cfg_addr;
+
+	u32			port_stx;
+
+	u32			stat_nig_timer;
+
+	struct port_mf_cfg	port_mf_config;
+	struct port_stats	stats;
+
+	u32			media_type;
+#define MEDIA_UNSPECIFIED       0x0
+#define MEDIA_SFPP_10G_FIBER    0x1
+#define MEDIA_XFP_FIBER         0x2
+#define MEDIA_DA_TWINAX         0x3
+#define MEDIA_BASE_T            0x4
+#define MEDIA_SFP_1G_FIBER      0x5
+#define MEDIA_KR                0xf0
+#define MEDIA_NOT_PRESENT       0xff
+
+	u32 lfa_status;
+#define LFA_LINK_FLAP_REASON_OFFSET             0
+#define LFA_LINK_FLAP_REASON_MASK               0x000000ff
+#define LFA_NO_REASON                                   (0 << 0)
+#define LFA_LINK_DOWN					BIT(0)
+#define LFA_FORCE_INIT                                  BIT(1)
+#define LFA_LOOPBACK_MISMATCH                           BIT(2)
+#define LFA_SPEED_MISMATCH                              BIT(3)
+#define LFA_FLOW_CTRL_MISMATCH                          BIT(4)
+#define LFA_ADV_SPEED_MISMATCH                          BIT(5)
+#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET        8
+#define LINK_FLAP_AVOIDANCE_COUNT_MASK          0x0000ff00
+#define LINK_FLAP_COUNT_OFFSET                  16
+#define LINK_FLAP_COUNT_MASK                    0x00ff0000
+
+	u32					link_change_count;
+
+	/* LLDP params */
+	struct lldp_config_params_s		lldp_config_params[
+		LLDP_MAX_LLDP_AGENTS];
+	struct lldp_status_params_s		lldp_status_params[
+		LLDP_MAX_LLDP_AGENTS];
+	struct lldp_system_tlvs_buffer_s	system_lldp_tlvs_buf;
+
+	/* DCBX related MIB */
+	struct dcbx_local_params		local_admin_dcbx_mib;
+	struct dcbx_mib				remote_dcbx_mib;
+	struct dcbx_mib				operational_dcbx_mib;
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      F U N C       */
+/*                                    */
+/**************************************/
+
+struct public_func {
+	u32	iscsi_boot_signature;
+	u32	iscsi_boot_block_offset;
+
+	u32	reserved[8];
+
+	u32	config;
+
+	/* E/R/I/D */
+	/* function 0 of each port cannot be hidden */
+#define FUNC_MF_CFG_FUNC_HIDE                   0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING          0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT    0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK               0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT              4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET           0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI              0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE               0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE               0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX                0x00000030
+
+	/* MINBW, MAXBW */
+	/* value range - 0..100, increments in 1 %  */
+#define FUNC_MF_CFG_MIN_BW_MASK                 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT                8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT              0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK                 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT                16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT              0x00640000
+
+	u32	status;
+#define FUNC_STATUS_VLINK_DOWN                  0x00000001
+
+	u32	mac_upper;  /* MAC */
+#define FUNC_MF_CFG_UPPERMAC_MASK               0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT              0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT            FUNC_MF_CFG_UPPERMAC_MASK
+	u32	mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT            0xffffffff
+
+	u32	fcoe_wwn_port_name_upper;
+	u32	fcoe_wwn_port_name_lower;
+
+	u32	fcoe_wwn_node_name_upper;
+	u32	fcoe_wwn_node_name_lower;
+
+	u32	ovlan_stag; /* tags */
+#define FUNC_MF_CFG_OV_STAG_MASK              0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT             0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT           FUNC_MF_CFG_OV_STAG_MASK
+
+	u32	pf_allocation;  /* vf per pf */
+
+	u32	preserve_data;  /* Will be used bt CCM */
+
+	u32	driver_last_activity_ts;
+
+	u32	drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+
+	u32	drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK        0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT       0
+
+#define DRV_ID_MCP_HSI_VER_MASK         0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT        16
+#define DRV_ID_MCP_HSI_VER_CURRENT	BIT(DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK            0xff000000
+#define DRV_ID_DRV_TYPE_SHIFT           24
+#define DRV_ID_DRV_TYPE_UNKNOWN         (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX		BIT(DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_WINDOWS         (2 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_DIAG            (3 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_PREBOOT         (4 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_SOLARIS         (5 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_VMWARE          (6 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_FREEBSD         (7 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_AIX             (8 << DRV_ID_DRV_TYPE_SHIFT)
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C       M B          */
+/*                                    */
+/**************************************/
+/* This is the only section that the driver can write to, and each */
+/* Basically each driver request to set feature parameters,
+ * will be done using a different command, which will be linked
+ * to a specific data structure from the union below.
+ * For huge strucuture, the common blank structure should be used.
+ */
+
+struct mcp_mac {
+	u32	mac_upper;  /* Upper 16 bits are always zeroes */
+	u32	mac_lower;
+};
+
+struct mcp_val64 {
+	u32	lo;
+	u32	hi;
+};
+
+struct mcp_file_att {
+	u32	nvm_start_addr;
+	u32	len;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+	u32	version;
+	u8	name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+union drv_union_data {
+	u32			ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
+	struct mcp_mac		wol_mac;
+
+	struct pmm_phy_cfg	drv_phy_cfg;
+
+	struct mcp_val64	val64; /* For PHY / AVS commands */
+
+	u8			raw_data[MCP_DRV_NVM_BUF_LEN];
+
+	struct mcp_file_att	file_att;
+
+	u32			ack_vf_disabled[VF_MAX_STATIC / 32];
+
+	struct drv_version_stc	drv_version;
+};
+
+struct public_drv_mb {
+	u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK                       0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ                   0x10000000
+#define DRV_MSG_CODE_LOAD_DONE                  0x11000000
+#define DRV_MSG_CODE_UNLOAD_REQ                 0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE                0x21000000
+#define DRV_MSG_CODE_INIT_PHY                   0x22000000
+	/* Params - FORCE - Reinitialize the link regardless of LFA */
+	/*        - DONT_CARE - Don't flap the link if up */
+#define DRV_MSG_CODE_LINK_RESET                 0x23000000
+
+#define DRV_MSG_CODE_SET_LLDP                   0x24000000
+#define DRV_MSG_CODE_SET_DCBX                   0x25000000
+
+#define DRV_MSG_CODE_NIG_DRAIN                  0x30000000
+
+#define DRV_MSG_CODE_INITIATE_FLR               0x02000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX                0xc0010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN         0x00010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_DATA          0x00020000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT           0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM             0x00050000
+#define DRV_MSG_CODE_NVM_WRITE_NVRAM            0x00060000
+#define DRV_MSG_CODE_NVM_DEL_FILE               0x00080000
+#define DRV_MSG_CODE_MCP_RESET                  0x00090000
+#define DRV_MSG_CODE_SET_SECURE_MODE            0x000a0000
+#define DRV_MSG_CODE_PHY_RAW_READ               0x000b0000
+#define DRV_MSG_CODE_PHY_RAW_WRITE              0x000c0000
+#define DRV_MSG_CODE_PHY_CORE_READ              0x000d0000
+#define DRV_MSG_CODE_PHY_CORE_WRITE             0x000e0000
+#define DRV_MSG_CODE_SET_VERSION                0x000f0000
+
+#define DRV_MSG_SEQ_NUMBER_MASK                 0x0000ffff
+
+	u32 drv_mb_param;
+
+	/* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN         0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP             0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED        0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED         0x00000003
+
+	/* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER        0x00000001
+
+	/* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE             0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE         0x00000002
+
+	/* LLDP / DCBX params*/
+#define DRV_MB_PARAM_LLDP_SEND_MASK             0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT            0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK            0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT           1
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK           0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT          3
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK   0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT  0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW     0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE   0x2
+
+#define DRV_MB_PARAM_NVM_OFFSET_SHIFT           0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK            0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_SHIFT              24
+#define DRV_MB_PARAM_NVM_LEN_MASK               0xFF000000
+
+#define DRV_MB_PARAM_PHY_ADDR_SHIFT             0
+#define DRV_MB_PARAM_PHY_ADDR_MASK              0x1FF0FFFF
+#define DRV_MB_PARAM_PHY_LANE_SHIFT             16
+#define DRV_MB_PARAM_PHY_LANE_MASK              0x000F0000
+#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT      29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK       0x20000000
+#define DRV_MB_PARAM_PHY_PORT_SHIFT             30
+#define DRV_MB_PARAM_PHY_PORT_MASK              0xc0000000
+
+/* configure vf MSIX params*/
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT    0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK     0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT   8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK    0x0000FF00
+
+	u32 fw_mb_header;
+#define FW_MSG_CODE_MASK                        0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE             0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT               0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION           0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA        0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI        0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG       0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE               0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE           0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT             0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION         0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE             0x21100000
+#define FW_MSG_CODE_INIT_PHY_DONE               0x21200000
+#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS   0x21300000
+#define FW_MSG_CODE_LINK_RESET_DONE             0x23000000
+#define FW_MSG_CODE_SET_LLDP_DONE               0x24000000
+#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT  0x24010000
+#define FW_MSG_CODE_SET_DCBX_DONE               0x25000000
+#define FW_MSG_CODE_NIG_DRAIN_DONE              0x30000000
+#define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE        0xb0010000
+#define FW_MSG_CODE_FLR_ACK                     0x02000000
+#define FW_MSG_CODE_FLR_NACK                    0x02100000
+
+#define FW_MSG_CODE_NVM_OK                      0x00010000
+#define FW_MSG_CODE_NVM_INVALID_MODE            0x00020000
+#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED       0x00030000
+#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
+#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND       0x00050000
+#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND          0x00060000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
+#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC     0x00090000
+#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR     0x000a0000
+#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE     0x000b0000
+#define FW_MSG_CODE_NVM_FILE_NOT_FOUND          0x000c0000
+#define FW_MSG_CODE_NVM_OPERATION_FAILED        0x000d0000
+#define FW_MSG_CODE_NVM_FAILED_UNALIGNED        0x000e0000
+#define FW_MSG_CODE_NVM_BAD_OFFSET              0x000f0000
+#define FW_MSG_CODE_NVM_BAD_SIGNATURE           0x00100000
+#define FW_MSG_CODE_NVM_FILE_READ_ONLY          0x00200000
+#define FW_MSG_CODE_NVM_UNKNOWN_FILE            0x00300000
+#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK      0x00400000
+#define FW_MSG_CODE_MCP_RESET_REJECT            0x00600000
+#define FW_MSG_CODE_PHY_OK                      0x00110000
+#define FW_MSG_CODE_PHY_ERROR                   0x00120000
+#define FW_MSG_CODE_SET_SECURE_MODE_ERROR       0x00130000
+#define FW_MSG_CODE_SET_SECURE_MODE_OK          0x00140000
+#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR         0x00150000
+
+#define FW_MSG_SEQ_NUMBER_MASK                  0x0000ffff
+
+	u32	fw_mb_param;
+
+	u32	drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK                      0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK              0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE                  0x00008000
+	u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK                      0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE                  0x00008000
+#define MCP_EVENT_MASK                          0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ        0x00010000
+
+	union drv_union_data union_data;
+};
+
+/* MFW - DRV MB */
+/**********************************************************************
+* Description
+*   Incremental Aggregative
+*   8-bit MFW counter per message
+*   8-bit ack-counter per message
+* Capabilities
+*   Provides up to 256 aggregative message per type
+*   Provides 4 message types in dword
+*   Message type pointers to byte offset
+*   Backward Compatibility by using sizeof for the counters.
+*   No lock requires for 32bit messages
+* Limitations:
+* In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
+* is required to prevent data corruption.
+**********************************************************************/
+enum MFW_DRV_MSG_TYPE {
+	MFW_DRV_MSG_LINK_CHANGE,
+	MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+	MFW_DRV_MSG_VF_DISABLED,
+	MFW_DRV_MSG_LLDP_DATA_UPDATED,
+	MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+	MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+	MFW_DRV_MSG_ERROR_RECOVERY,
+	MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs)    (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id)       (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id)      ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id)        (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+struct public_mfw_mb {
+	u32	sup_msgs;
+	u32	msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+	u32	ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C       D A T A      */
+/*                                    */
+/**************************************/
+enum public_sections {
+	PUBLIC_DRV_MB,          /* Points to the first drv_mb of path0 */
+	PUBLIC_MFW_MB,          /* Points to the first mfw_mb of path0 */
+	PUBLIC_GLOBAL,
+	PUBLIC_PATH,
+	PUBLIC_PORT,
+	PUBLIC_FUNC,
+	PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+	u32	ver;
+	u8	name[32];
+};
+
+struct mcp_public_data {
+	/* The sections fields is an array */
+	u32			num_sections;
+	offsize_t		sections[PUBLIC_MAX_SECTIONS];
+	struct public_drv_mb	drv_mb[MCP_GLOB_FUNC_MAX];
+	struct public_mfw_mb	mfw_mb[MCP_GLOB_FUNC_MAX];
+	struct public_global	global;
+	struct public_path	path[MCP_GLOB_PATH_MAX];
+	struct public_port	port[MCP_GLOB_PORT_MAX];
+	struct public_func	func[MCP_GLOB_FUNC_MAX];
+	struct drv_ver_info_stc drv_info;
+};
+
+struct nvm_cfg_mac_address {
+	u32	mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK                             0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET                           0
+
+	u32	mac_addr_lo;
+};
+
+/******************************************
+* nvm_cfg1 structs
+******************************************/
+
+struct nvm_cfg1_glob {
+	u32 generic_cont0;					/* 0x0 */
+#define NVM_CFG1_GLOB_BOARD_SWAP_MASK                           0x0000000F
+#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET                         0
+#define NVM_CFG1_GLOB_BOARD_SWAP_NONE                           0x0
+#define NVM_CFG1_GLOB_BOARD_SWAP_PATH                           0x1
+#define NVM_CFG1_GLOB_BOARD_SWAP_PORT                           0x2
+#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH                           0x3
+#define NVM_CFG1_GLOB_MF_MODE_MASK                              0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET                            4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED                        0x0
+#define NVM_CFG1_GLOB_MF_MODE_FORCED_SF                         0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4                             0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0                           0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5                           0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0                           0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD                                0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP                               0x7
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK              0x00001000
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET            12
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED          0x0
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED           0x1
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK                       0x001FE000
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET                     13
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK                      0x1FE00000
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET                    21
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK                         0x20000000
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET                       29
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED                     0x0
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED                      0x1
+#define NVM_CFG1_GLOB_ENABLE_ATC_MASK                           0x40000000
+#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET                         30
+#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED                       0x0
+#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED                        0x1
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK                       0x80000000
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET                     31
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED                   0x0
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED                    0x1
+
+	u32	engineering_change[3];				/* 0x4 */
+
+	u32	manufacturing_id;				/* 0x10 */
+
+	u32	serial_number[4];				/* 0x14 */
+
+	u32	pcie_cfg;					/* 0x24 */
+#define NVM_CFG1_GLOB_PCI_GEN_MASK                              0x00000003
+#define NVM_CFG1_GLOB_PCI_GEN_OFFSET                            0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1                          0x0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2                          0x1
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3                          0x2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK                   0x00000004
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET                 2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED               0x0
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED                0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK                         0x00000018
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET                       3
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED               0x0
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED                 0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED                  0x2
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED              0x3
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK               0x00000020
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET             5
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED           0x0
+#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED            0x1
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK                 0x000003C0
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET               6
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK                     0x00001C00
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET                   10
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW                       0x0
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB                      0x1
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB                    0x2
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB                    0x3
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK                     0x001FE000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET                   13
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK                     0x1FE00000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET                   21
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK                      0x60000000
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET                    29
+
+	u32 mgmt_traffic;                                       /* 0x28 */
+#define NVM_CFG1_GLOB_RESERVED60_MASK                           0x00000001
+#define NVM_CFG1_GLOB_RESERVED60_OFFSET                         0
+#define NVM_CFG1_GLOB_RESERVED60_100KHZ                         0x0
+#define NVM_CFG1_GLOB_RESERVED60_400KHZ                         0x1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK                     0x000001FE
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET                   1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK                     0x0001FE00
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET                   9
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK                        0x01FE0000
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET                      17
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK                        0x06000000
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET                      25
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED                    0x0
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII                        0x1
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII                       0x2
+
+	u32 core_cfg;                                           /* 0x2C */
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK                    0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET                  0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G                0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G                0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G               0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F              0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E              0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G                0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G                0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G                0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G                0xD
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK             0x00000100
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET           8
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED         0x0
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED          0x1
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK            0x00000200
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET          9
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED        0x0
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED         0x1
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK                      0x0003FC00
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET                    10
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK                     0x03FC0000
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET                   18
+#define NVM_CFG1_GLOB_AVS_MODE_MASK                             0x1C000000
+#define NVM_CFG1_GLOB_AVS_MODE_OFFSET                           26
+#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP                       0x0
+#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP                        0x1
+#define NVM_CFG1_GLOB_AVS_MODE_DISABLED                         0x3
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK                 0x60000000
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET               29
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED             0x0
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED              0x1
+
+	u32 e_lane_cfg1;					/* 0x30 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK                        0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET                      0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK                        0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET                      4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK                        0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET                      8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK                        0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET                      12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK                        0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET                      16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK                        0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET                      20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK                        0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET                      24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK                        0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET                      28
+
+	u32 e_lane_cfg2;					/* 0x34 */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK                    0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET                  0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK                    0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET                  1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK                    0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET                  2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK                    0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET                  3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK                    0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET                  4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK                    0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET                  5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK                    0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET                  6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK                    0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET                  7
+#define NVM_CFG1_GLOB_SMBUS_MODE_MASK                           0x00000F00
+#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET                         8
+#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED                       0x0
+#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ                         0x1
+#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ                         0x2
+#define NVM_CFG1_GLOB_NCSI_MASK                                 0x0000F000
+#define NVM_CFG1_GLOB_NCSI_OFFSET                               12
+#define NVM_CFG1_GLOB_NCSI_DISABLED                             0x0
+#define NVM_CFG1_GLOB_NCSI_ENABLED                              0x1
+
+	u32 f_lane_cfg1;					/* 0x38 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK                        0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET                      0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK                        0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET                      4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK                        0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET                      8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK                        0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET                      12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK                        0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET                      16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK                        0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET                      20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK                        0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET                      24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK                        0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET                      28
+
+	u32 f_lane_cfg2;					/* 0x3C */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK                    0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET                  0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK                    0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET                  1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK                    0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET                  2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK                    0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET                  3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK                    0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET                  4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK                    0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET                  5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK                    0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET                  6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK                    0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET                  7
+
+	u32 eagle_preemphasis;					/* 0x40 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK                         0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET                       0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK                         0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET                       8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK                         0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET                       16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK                         0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET                       24
+
+	u32 eagle_driver_current;				/* 0x44 */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK                            0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET                          0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK                            0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET                          8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK                            0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET                          16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK                            0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET                          24
+
+	u32 falcon_preemphasis;					/* 0x48 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK                         0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET                       0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK                         0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET                       8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK                         0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET                       16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK                         0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET                       24
+
+	u32 falcon_driver_current;				/* 0x4C */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK                            0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET                          0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK                            0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET                          8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK                            0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET                          16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK                            0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET                          24
+
+	u32	pci_id;						/* 0x50 */
+#define NVM_CFG1_GLOB_VENDOR_ID_MASK                            0x0000FFFF
+#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET                          0
+
+	u32	pci_subsys_id;					/* 0x54 */
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK                  0x0000FFFF
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET                0
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK                  0xFFFF0000
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET                16
+
+	u32	bar;						/* 0x58 */
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK                   0x0000000F
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET                 0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED               0x0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K                     0x1
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K                     0x2
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K                     0x3
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K                    0x4
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K                    0x5
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K                    0x6
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K                   0x7
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K                   0x8
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K                   0x9
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M                     0xA
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M                     0xB
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M                     0xC
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M                     0xD
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M                    0xE
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M                    0xF
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK                     0x000000F0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET                   4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED                 0x0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K                       0x1
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K                       0x2
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K                      0x3
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K                      0x4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K                      0x5
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K                     0x6
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K                     0x7
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K                     0x8
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M                       0x9
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M                       0xA
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M                       0xB
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M                       0xC
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M                      0xD
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M                      0xE
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M                      0xF
+#define NVM_CFG1_GLOB_BAR2_SIZE_MASK                            0x00000F00
+#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET                          8
+#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED                        0x0
+#define NVM_CFG1_GLOB_BAR2_SIZE_64K                             0x1
+#define NVM_CFG1_GLOB_BAR2_SIZE_128K                            0x2
+#define NVM_CFG1_GLOB_BAR2_SIZE_256K                            0x3
+#define NVM_CFG1_GLOB_BAR2_SIZE_512K                            0x4
+#define NVM_CFG1_GLOB_BAR2_SIZE_1M                              0x5
+#define NVM_CFG1_GLOB_BAR2_SIZE_2M                              0x6
+#define NVM_CFG1_GLOB_BAR2_SIZE_4M                              0x7
+#define NVM_CFG1_GLOB_BAR2_SIZE_8M                              0x8
+#define NVM_CFG1_GLOB_BAR2_SIZE_16M                             0x9
+#define NVM_CFG1_GLOB_BAR2_SIZE_32M                             0xA
+#define NVM_CFG1_GLOB_BAR2_SIZE_64M                             0xB
+#define NVM_CFG1_GLOB_BAR2_SIZE_128M                            0xC
+#define NVM_CFG1_GLOB_BAR2_SIZE_256M                            0xD
+#define NVM_CFG1_GLOB_BAR2_SIZE_512M                            0xE
+#define NVM_CFG1_GLOB_BAR2_SIZE_1G                              0xF
+
+	u32 eagle_txfir_main;					/* 0x5C */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET                   24
+
+	u32 eagle_txfir_post;					/* 0x60 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET                   24
+
+	u32 falcon_txfir_main;					/* 0x64 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET                   24
+
+	u32 falcon_txfir_post;					/* 0x68 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET                   24
+
+	u32 manufacture_ver;					/* 0x6C */
+#define NVM_CFG1_GLOB_MANUF0_VER_MASK                           0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET                         0
+#define NVM_CFG1_GLOB_MANUF1_VER_MASK                           0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET                         6
+#define NVM_CFG1_GLOB_MANUF2_VER_MASK                           0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET                         12
+#define NVM_CFG1_GLOB_MANUF3_VER_MASK                           0x00FC0000
+#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET                         18
+#define NVM_CFG1_GLOB_MANUF4_VER_MASK                           0x3F000000
+#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET                         24
+
+	u32 manufacture_time;					/* 0x70 */
+#define NVM_CFG1_GLOB_MANUF0_TIME_MASK                          0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET                        0
+#define NVM_CFG1_GLOB_MANUF1_TIME_MASK                          0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET                        6
+#define NVM_CFG1_GLOB_MANUF2_TIME_MASK                          0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET                        12
+
+	u32 led_global_settings;				/* 0x74 */
+#define NVM_CFG1_GLOB_LED_SWAP_0_MASK                           0x0000000F
+#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET                         0
+#define NVM_CFG1_GLOB_LED_SWAP_1_MASK                           0x000000F0
+#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET                         4
+#define NVM_CFG1_GLOB_LED_SWAP_2_MASK                           0x00000F00
+#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET                         8
+#define NVM_CFG1_GLOB_LED_SWAP_3_MASK                           0x0000F000
+#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET                         12
+
+	u32	generic_cont1;					/* 0x78 */
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK                         0x000003FF
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET                       0
+
+	u32	mbi_version;					/* 0x7C */
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK                        0x000000FF
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET                      0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK                        0x0000FF00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET                      8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK                        0x00FF0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET                      16
+
+	u32	mbi_date;					/* 0x80 */
+
+	u32	misc_sig;					/* 0x84 */
+
+	/*  Define the GPIO mapping to switch i2c mux */
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK                   0x000000FF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET                 0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK                   0x0000FF00
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET                 8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA                      0x0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0                   0x1
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1                   0x2
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2                   0x3
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3                   0x4
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4                   0x5
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5                   0x6
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6                   0x7
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7                   0x8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8                   0x9
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9                   0xA
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10                  0xB
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11                  0xC
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12                  0xD
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13                  0xE
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14                  0xF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15                  0x10
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16                  0x11
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17                  0x12
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18                  0x13
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19                  0x14
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20                  0x15
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21                  0x16
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22                  0x17
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23                  0x18
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24                  0x19
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25                  0x1A
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26                  0x1B
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27                  0x1C
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28                  0x1D
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29                  0x1E
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30                  0x1F
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31                  0x20
+
+	u32 reserved[46];					/* 0x88 */
+};
+
+struct nvm_cfg1_path {
+	u32 reserved[30];					/* 0x0 */
+};
+
+struct nvm_cfg1_port {
+	u32 power_dissipated;					/* 0x0 */
+#define NVM_CFG1_PORT_POWER_DIS_D0_MASK                         0x000000FF
+#define NVM_CFG1_PORT_POWER_DIS_D0_OFFSET                       0
+#define NVM_CFG1_PORT_POWER_DIS_D1_MASK                         0x0000FF00
+#define NVM_CFG1_PORT_POWER_DIS_D1_OFFSET                       8
+#define NVM_CFG1_PORT_POWER_DIS_D2_MASK                         0x00FF0000
+#define NVM_CFG1_PORT_POWER_DIS_D2_OFFSET                       16
+#define NVM_CFG1_PORT_POWER_DIS_D3_MASK                         0xFF000000
+#define NVM_CFG1_PORT_POWER_DIS_D3_OFFSET                       24
+
+	u32 power_consumed;					/* 0x4 */
+#define NVM_CFG1_PORT_POWER_CONS_D0_MASK                        0x000000FF
+#define NVM_CFG1_PORT_POWER_CONS_D0_OFFSET                      0
+#define NVM_CFG1_PORT_POWER_CONS_D1_MASK                        0x0000FF00
+#define NVM_CFG1_PORT_POWER_CONS_D1_OFFSET                      8
+#define NVM_CFG1_PORT_POWER_CONS_D2_MASK                        0x00FF0000
+#define NVM_CFG1_PORT_POWER_CONS_D2_OFFSET                      16
+#define NVM_CFG1_PORT_POWER_CONS_D3_MASK                        0xFF000000
+#define NVM_CFG1_PORT_POWER_CONS_D3_OFFSET                      24
+
+	u32 generic_cont0;					/* 0x8 */
+#define NVM_CFG1_PORT_LED_MODE_MASK                             0x000000FF
+#define NVM_CFG1_PORT_LED_MODE_OFFSET                           0
+#define NVM_CFG1_PORT_LED_MODE_MAC1                             0x0
+#define NVM_CFG1_PORT_LED_MODE_PHY1                             0x1
+#define NVM_CFG1_PORT_LED_MODE_PHY2                             0x2
+#define NVM_CFG1_PORT_LED_MODE_PHY3                             0x3
+#define NVM_CFG1_PORT_LED_MODE_MAC2                             0x4
+#define NVM_CFG1_PORT_LED_MODE_PHY4                             0x5
+#define NVM_CFG1_PORT_LED_MODE_PHY5                             0x6
+#define NVM_CFG1_PORT_LED_MODE_PHY6                             0x7
+#define NVM_CFG1_PORT_LED_MODE_MAC3                             0x8
+#define NVM_CFG1_PORT_LED_MODE_PHY7                             0x9
+#define NVM_CFG1_PORT_LED_MODE_PHY8                             0xA
+#define NVM_CFG1_PORT_LED_MODE_PHY9                             0xB
+#define NVM_CFG1_PORT_LED_MODE_MAC4                             0xC
+#define NVM_CFG1_PORT_LED_MODE_PHY10                            0xD
+#define NVM_CFG1_PORT_LED_MODE_PHY11                            0xE
+#define NVM_CFG1_PORT_LED_MODE_PHY12                            0xF
+#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK                        0x0000FF00
+#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET                      8
+#define NVM_CFG1_PORT_DCBX_MODE_MASK                            0x000F0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET                          16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED                        0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE                            0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE                             0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC                         0x3
+
+	u32	pcie_cfg;					/* 0xC */
+#define NVM_CFG1_PORT_RESERVED15_MASK                           0x00000007
+#define NVM_CFG1_PORT_RESERVED15_OFFSET                         0
+
+	u32	features;					/* 0x10 */
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK           0x00000001
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET         0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED       0x0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED        0x1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK                     0x00000002
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET                   1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED                 0x0
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED                  0x1
+
+	u32 speed_cap_mask;					/* 0x14 */
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK            0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET          0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G              0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G             0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G             0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G             0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G             0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G            0x40
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK            0xFFFF0000
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET          16
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G              0x1
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G             0x2
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G             0x8
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G             0x10
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G             0x20
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G            0x40
+
+	u32 link_settings;					/* 0x18 */
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK                       0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET                     0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG                    0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G                         0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G                        0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G                        0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G                        0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G                        0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G                       0x7
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK                     0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET                   4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG                  0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX                       0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX                       0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK                       0x00000780
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET                     7
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG                    0x0
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G                         0x1
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G                        0x2
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G                        0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G                        0x5
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G                        0x6
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G                       0x7
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK                     0x00003800
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET                   11
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG                  0x1
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX                       0x2
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX                       0x4
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK      0x00004000
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET    14
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED  0x0
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED   0x1
+
+	u32 phy_cfg;						/* 0x1C */
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK                  0x0000FFFF
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET                0
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG                 0x1
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER             0x2
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER                 0x4
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN       0x8
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN        0x10
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK                 0x00FF0000
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET               16
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS               0x0
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR                   0x2
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2                  0x3
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4                  0x4
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI                  0x8
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI                  0x9
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X                0xB
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII                0xC
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI                0xD
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI                 0xE
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI                0xF
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI                 0x10
+#define NVM_CFG1_PORT_AN_MODE_MASK                              0xFF000000
+#define NVM_CFG1_PORT_AN_MODE_OFFSET                            24
+#define NVM_CFG1_PORT_AN_MODE_NONE                              0x0
+#define NVM_CFG1_PORT_AN_MODE_CL73                              0x1
+#define NVM_CFG1_PORT_AN_MODE_CL37                              0x2
+#define NVM_CFG1_PORT_AN_MODE_CL73_BAM                          0x3
+#define NVM_CFG1_PORT_AN_MODE_CL37_BAM                          0x4
+#define NVM_CFG1_PORT_AN_MODE_HPAM                              0x5
+#define NVM_CFG1_PORT_AN_MODE_SGMII                             0x6
+
+	u32 mgmt_traffic;					/* 0x20 */
+#define NVM_CFG1_PORT_RESERVED61_MASK                           0x0000000F
+#define NVM_CFG1_PORT_RESERVED61_OFFSET                         0
+#define NVM_CFG1_PORT_RESERVED61_DISABLED                       0x0
+#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_RMII                 0x1
+#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_SMBUS                0x2
+
+	u32 ext_phy;						/* 0x24 */
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK                    0x000000FF
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET                  0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE                    0x0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844                0x1
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK                 0x0000FF00
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET               8
+
+	u32 mba_cfg1;						/* 0x28 */
+#define NVM_CFG1_PORT_MBA_MASK                                  0x00000001
+#define NVM_CFG1_PORT_MBA_OFFSET                                0
+#define NVM_CFG1_PORT_MBA_DISABLED                              0x0
+#define NVM_CFG1_PORT_MBA_ENABLED                               0x1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_MASK                        0x00000006
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_OFFSET                      1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_AUTO                        0x0
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_BBS                         0x1
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT18H                      0x2
+#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT19H                      0x3
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK                       0x00000078
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET                     3
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK                    0x00000080
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET                  7
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S                  0x0
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B                  0x1
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK                0x00000100
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET              8
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED            0x0
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED             0x1
+#define NVM_CFG1_PORT_RESERVED5_MASK                            0x0001FE00
+#define NVM_CFG1_PORT_RESERVED5_OFFSET                          9
+#define NVM_CFG1_PORT_RESERVED5_DISABLED                        0x0
+#define NVM_CFG1_PORT_RESERVED5_2K                              0x1
+#define NVM_CFG1_PORT_RESERVED5_4K                              0x2
+#define NVM_CFG1_PORT_RESERVED5_8K                              0x3
+#define NVM_CFG1_PORT_RESERVED5_16K                             0x4
+#define NVM_CFG1_PORT_RESERVED5_32K                             0x5
+#define NVM_CFG1_PORT_RESERVED5_64K                             0x6
+#define NVM_CFG1_PORT_RESERVED5_128K                            0x7
+#define NVM_CFG1_PORT_RESERVED5_256K                            0x8
+#define NVM_CFG1_PORT_RESERVED5_512K                            0x9
+#define NVM_CFG1_PORT_RESERVED5_1M                              0xA
+#define NVM_CFG1_PORT_RESERVED5_2M                              0xB
+#define NVM_CFG1_PORT_RESERVED5_4M                              0xC
+#define NVM_CFG1_PORT_RESERVED5_8M                              0xD
+#define NVM_CFG1_PORT_RESERVED5_16M                             0xE
+#define NVM_CFG1_PORT_RESERVED5_32M                             0xF
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_MASK                       0x001E0000
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_OFFSET                     17
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_AUTONEG                    0x0
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_1G                         0x1
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_10G                        0x2
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_25G                        0x4
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_40G                        0x5
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_50G                        0x6
+#define NVM_CFG1_PORT_MBA_LINK_SPEED_100G                       0x7
+#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_MASK                 0x00E00000
+#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_OFFSET               21
+
+	u32	mba_cfg2;					/* 0x2C */
+#define NVM_CFG1_PORT_MBA_VLAN_VALUE_MASK                       0x0000FFFF
+#define NVM_CFG1_PORT_MBA_VLAN_VALUE_OFFSET                     0
+#define NVM_CFG1_PORT_MBA_VLAN_MASK                             0x00010000
+#define NVM_CFG1_PORT_MBA_VLAN_OFFSET                           16
+
+	u32	vf_cfg;						/* 0x30 */
+#define NVM_CFG1_PORT_RESERVED8_MASK                            0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED8_OFFSET                          0
+#define NVM_CFG1_PORT_RESERVED6_MASK                            0x000F0000
+#define NVM_CFG1_PORT_RESERVED6_OFFSET                          16
+#define NVM_CFG1_PORT_RESERVED6_DISABLED                        0x0
+#define NVM_CFG1_PORT_RESERVED6_4K                              0x1
+#define NVM_CFG1_PORT_RESERVED6_8K                              0x2
+#define NVM_CFG1_PORT_RESERVED6_16K                             0x3
+#define NVM_CFG1_PORT_RESERVED6_32K                             0x4
+#define NVM_CFG1_PORT_RESERVED6_64K                             0x5
+#define NVM_CFG1_PORT_RESERVED6_128K                            0x6
+#define NVM_CFG1_PORT_RESERVED6_256K                            0x7
+#define NVM_CFG1_PORT_RESERVED6_512K                            0x8
+#define NVM_CFG1_PORT_RESERVED6_1M                              0x9
+#define NVM_CFG1_PORT_RESERVED6_2M                              0xA
+#define NVM_CFG1_PORT_RESERVED6_4M                              0xB
+#define NVM_CFG1_PORT_RESERVED6_8M                              0xC
+#define NVM_CFG1_PORT_RESERVED6_16M                             0xD
+#define NVM_CFG1_PORT_RESERVED6_32M                             0xE
+#define NVM_CFG1_PORT_RESERVED6_64M                             0xF
+
+	struct nvm_cfg_mac_address	lldp_mac_address;	/* 0x34 */
+
+	u32				led_port_settings;	/* 0x3C */
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK                   0x000000FF
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET                 0
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK                   0x0000FF00
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET                 8
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK                   0x00FF0000
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET                 16
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G                      0x1
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G                     0x2
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G                     0x8
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G                     0x10
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G                     0x20
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G                    0x40
+
+	u32 transceiver_00;					/* 0x40 */
+
+	/*  Define for mapping of transceiver signal module absent */
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK                     0x000000FF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET                   0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA                       0x0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0                    0x1
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1                    0x2
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2                    0x3
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3                    0x4
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4                    0x5
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5                    0x6
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6                    0x7
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7                    0x8
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8                    0x9
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9                    0xA
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10                   0xB
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11                   0xC
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12                   0xD
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13                   0xE
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14                   0xF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15                   0x10
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16                   0x11
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17                   0x12
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18                   0x13
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19                   0x14
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20                   0x15
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21                   0x16
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22                   0x17
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23                   0x18
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24                   0x19
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25                   0x1A
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26                   0x1B
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27                   0x1C
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28                   0x1D
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29                   0x1E
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30                   0x1F
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31                   0x20
+	/*  Define the GPIO mux settings  to switch i2c mux to this port */
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK                  0x00000F00
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET                8
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK                  0x0000F000
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET                12
+
+	u32 reserved[133];					/* 0x44 */
+};
+
+struct nvm_cfg1_func {
+	struct nvm_cfg_mac_address	mac_address;		/* 0x0 */
+
+	u32				rsrv1;			/* 0x8 */
+#define NVM_CFG1_FUNC_RESERVED1_MASK                            0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED1_OFFSET                          0
+#define NVM_CFG1_FUNC_RESERVED2_MASK                            0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED2_OFFSET                          16
+
+	u32				rsrv2;			/* 0xC */
+#define NVM_CFG1_FUNC_RESERVED3_MASK                            0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED3_OFFSET                          0
+#define NVM_CFG1_FUNC_RESERVED4_MASK                            0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED4_OFFSET                          16
+
+	u32				device_id;		/* 0x10 */
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK                  0x0000FFFF
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET                0
+#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK                     0xFFFF0000
+#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET                   16
+
+	u32				cmn_cfg;		/* 0x14 */
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_MASK                    0x00000007
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_OFFSET                  0
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_PXE                     0x0
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_RPL                     0x1
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_BOOTP                   0x2
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_ISCSI_BOOT              0x3
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_FCOE_BOOT               0x4
+#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_NONE                    0x7
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK                     0x0007FFF8
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET                   3
+#define NVM_CFG1_FUNC_PERSONALITY_MASK                          0x00780000
+#define NVM_CFG1_FUNC_PERSONALITY_OFFSET                        19
+#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET                      0x0
+#define NVM_CFG1_FUNC_PERSONALITY_ISCSI                         0x1
+#define NVM_CFG1_FUNC_PERSONALITY_FCOE                          0x2
+#define NVM_CFG1_FUNC_PERSONALITY_ROCE                          0x3
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK                     0x7F800000
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET                   23
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK                   0x80000000
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET                 31
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED               0x0
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED                0x1
+
+	u32 pci_cfg;						/* 0x18 */
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK                 0x0000007F
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET               0
+#define NVM_CFG1_FUNC_RESERVESD12_MASK                          0x00003F80
+#define NVM_CFG1_FUNC_RESERVESD12_OFFSET                        7
+#define NVM_CFG1_FUNC_BAR1_SIZE_MASK                            0x0003C000
+#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET                          14
+#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED                        0x0
+#define NVM_CFG1_FUNC_BAR1_SIZE_64K                             0x1
+#define NVM_CFG1_FUNC_BAR1_SIZE_128K                            0x2
+#define NVM_CFG1_FUNC_BAR1_SIZE_256K                            0x3
+#define NVM_CFG1_FUNC_BAR1_SIZE_512K                            0x4
+#define NVM_CFG1_FUNC_BAR1_SIZE_1M                              0x5
+#define NVM_CFG1_FUNC_BAR1_SIZE_2M                              0x6
+#define NVM_CFG1_FUNC_BAR1_SIZE_4M                              0x7
+#define NVM_CFG1_FUNC_BAR1_SIZE_8M                              0x8
+#define NVM_CFG1_FUNC_BAR1_SIZE_16M                             0x9
+#define NVM_CFG1_FUNC_BAR1_SIZE_32M                             0xA
+#define NVM_CFG1_FUNC_BAR1_SIZE_64M                             0xB
+#define NVM_CFG1_FUNC_BAR1_SIZE_128M                            0xC
+#define NVM_CFG1_FUNC_BAR1_SIZE_256M                            0xD
+#define NVM_CFG1_FUNC_BAR1_SIZE_512M                            0xE
+#define NVM_CFG1_FUNC_BAR1_SIZE_1G                              0xF
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK                        0x03FC0000
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET                      18
+
+	struct nvm_cfg_mac_address	fcoe_node_wwn_mac_addr;	/* 0x1C */
+
+	struct nvm_cfg_mac_address	fcoe_port_wwn_mac_addr;	/* 0x24 */
+
+	u32				reserved[9];		/* 0x2C */
+};
+
+struct nvm_cfg1 {
+	struct nvm_cfg1_glob	glob;				/* 0x0 */
+
+	struct nvm_cfg1_path	path[MCP_GLOB_PATH_MAX];	/* 0x140 */
+
+	struct nvm_cfg1_port	port[MCP_GLOB_PORT_MAX];	/* 0x230 */
+
+	struct nvm_cfg1_func	func[MCP_GLOB_FUNC_MAX];	/* 0xB90 */
+};
+
+/******************************************
+* nvm_cfg structs
+******************************************/
+
+enum nvm_cfg_sections {
+	NVM_CFG_SECTION_NVM_CFG1,
+	NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+	u32		num_sections;
+	u32		sections_offset[NVM_CFG_SECTION_MAX];
+	struct nvm_cfg1 cfg1;
+};
+
+#define PORT_0          0
+#define PORT_1          1
+#define PORT_2          2
+#define PORT_3          3
+
+extern struct spad_layout g_spad;
+
+#define MCP_SPAD_SIZE                       0x00028000  /* 160 KB */
+
+#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE))
+
+#define TO_OFFSIZE(_offset, _size)				\
+	(u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) |	\
+	      (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT))
+
+enum spad_sections {
+	SPAD_SECTION_TRACE,
+	SPAD_SECTION_NVM_CFG,
+	SPAD_SECTION_PUBLIC,
+	SPAD_SECTION_PRIVATE,
+	SPAD_SECTION_MAX
+};
+
+struct spad_layout {
+	struct nvm_cfg		nvm_cfg;
+	struct mcp_public_data	public_data;
+};
+
+#define CRC_MAGIC_VALUE                     0xDEBB20E3
+#define CRC32_POLYNOMIAL                    0xEDB88320
+#define NVM_CRC_SIZE                            (sizeof(u32))
+
+enum nvm_sw_arbitrator {
+	NVM_SW_ARB_HOST,
+	NVM_SW_ARB_MCP,
+	NVM_SW_ARB_UART,
+	NVM_SW_ARB_RESERVED
+};
+
+/****************************************************************************
+* Boot Strap Region                                                        *
+****************************************************************************/
+struct legacy_bootstrap_region {
+	u32	magic_value;
+#define NVM_MAGIC_VALUE          0x669955aa
+	u32	sram_start_addr;
+	u32	code_len;               /* boot code length (in dwords) */
+	u32	code_start_addr;
+	u32	crc;                    /* 32-bit CRC */
+};
+
+/****************************************************************************
+* Directories Region                                                       *
+****************************************************************************/
+struct nvm_code_entry {
+	u32	image_type;             /* Image type */
+	u32	nvm_start_addr;         /* NVM address of the image */
+	u32	len;                    /* Include CRC */
+	u32	sram_start_addr;
+	u32	sram_run_addr;          /* Relevant in case of MIM only */
+};
+
+enum nvm_image_type {
+	NVM_TYPE_TIM1		= 0x01,
+	NVM_TYPE_TIM2		= 0x02,
+	NVM_TYPE_MIM1		= 0x03,
+	NVM_TYPE_MIM2		= 0x04,
+	NVM_TYPE_MBA		= 0x05,
+	NVM_TYPE_MODULES_PN	= 0x06,
+	NVM_TYPE_VPD		= 0x07,
+	NVM_TYPE_MFW_TRACE1	= 0x08,
+	NVM_TYPE_MFW_TRACE2	= 0x09,
+	NVM_TYPE_NVM_CFG1	= 0x0a,
+	NVM_TYPE_L2B		= 0x0b,
+	NVM_TYPE_DIR1		= 0x0c,
+	NVM_TYPE_EAGLE_FW1	= 0x0d,
+	NVM_TYPE_FALCON_FW1	= 0x0e,
+	NVM_TYPE_PCIE_FW1	= 0x0f,
+	NVM_TYPE_HW_SET		= 0x10,
+	NVM_TYPE_LIM		= 0x11,
+	NVM_TYPE_AVS_FW1	= 0x12,
+	NVM_TYPE_DIR2		= 0x13,
+	NVM_TYPE_CCM		= 0x14,
+	NVM_TYPE_EAGLE_FW2	= 0x15,
+	NVM_TYPE_FALCON_FW2	= 0x16,
+	NVM_TYPE_PCIE_FW2	= 0x17,
+	NVM_TYPE_AVS_FW2	= 0x18,
+
+	NVM_TYPE_MAX,
+};
+
+#define MAX_NVM_DIR_ENTRIES 200
+
+struct nvm_dir {
+	s32 seq;
+#define NVM_DIR_NEXT_MFW_MASK   0x00000001
+#define NVM_DIR_SEQ_MASK        0xfffffffe
+#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
+
+#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK)
+
+	u32			num_images;
+	u32			rsrv;
+	struct nvm_code_entry	code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
+};
+
+#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) +		 \
+				   (_num_images -			 \
+				    1) * sizeof(struct nvm_code_entry) + \
+				   NVM_CRC_SIZE)
+
+struct nvm_vpd_image {
+	u32	format_revision;
+#define VPD_IMAGE_VERSION        1
+
+	/* This array length depends on the number of VPD fields */
+	u8	vpd_data[1];
+};
+
+/****************************************************************************
+* NVRAM FULL MAP                                                           *
+****************************************************************************/
+#define DIR_ID_1    (0)
+#define DIR_ID_2    (1)
+#define MAX_DIR_IDS (2)
+
+#define MFW_BUNDLE_1    (0)
+#define MFW_BUNDLE_2    (1)
+#define MAX_MFW_BUNDLES (2)
+
+#define FLASH_PAGE_SIZE 0x1000
+#define NVM_DIR_MAX_SIZE    (FLASH_PAGE_SIZE)           /* 4Kb */
+#define ASIC_MIM_MAX_SIZE   (300 * FLASH_PAGE_SIZE)     /* 1.2Mb */
+#define FPGA_MIM_MAX_SIZE   (25 * FLASH_PAGE_SIZE)      /* 60Kb */
+
+#define LIM_MAX_SIZE        ((2 *				      \
+			      FLASH_PAGE_SIZE) -		      \
+			     sizeof(struct legacy_bootstrap_region) - \
+			     NVM_RSV_SIZE)
+#define LIM_OFFSET          (NVM_OFFSET(lim_image))
+#define NVM_RSV_SIZE            (44)
+#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \
+			       FPGA_MIM_MAX_SIZE)
+#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \
+				  ((idx ==			     \
+				    NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0))
+#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \
+				      MIM_MAX_SIZE(is_asic) * 2)
+
+union nvm_dir_union {
+	struct nvm_dir	dir;
+	u8		page[FLASH_PAGE_SIZE];
+};
+
+/*                        Address
+ *  +-------------------+ 0x000000
+ *  |    Bootstrap:     |
+ *  | magic_number      |
+ *  | sram_start_addr   |
+ *  | code_len          |
+ *  | code_start_addr   |
+ *  | crc               |
+ *  +-------------------+ 0x000014
+ *  | rsrv              |
+ *  +-------------------+ 0x000040
+ *  | LIM               |
+ *  +-------------------+ 0x002000
+ *  | Dir1              |
+ *  +-------------------+ 0x003000
+ *  | Dir2              |
+ *  +-------------------+ 0x004000
+ *  | MIM1              |
+ *  +-------------------+ 0x130000
+ *  | MIM2              |
+ *  +-------------------+ 0x25C000
+ *  | Rest Images:      |
+ *  | TIM1/2            |
+ *  | MFW_TRACE1/2      |
+ *  | Eagle/Falcon FW   |
+ *  | PCIE/AVS FW       |
+ *  | MBA/CCM/L2B       |
+ *  | VPD               |
+ *  | optic_modules     |
+ *  |  ...              |
+ *  +-------------------+ 0x400000
+ */
+struct nvm_image {
+/*********** !!!  FIXED SECTIONS  !!! DO NOT MODIFY !!! **********************/
+	/* NVM Offset  (size) */
+	struct legacy_bootstrap_region	bootstrap;
+	u8				rsrv[NVM_RSV_SIZE];
+	u8				lim_image[LIM_MAX_SIZE];
+	union nvm_dir_union		dir[MAX_MFW_BUNDLES];
+
+	/* MIM1_IMAGE                              0x004000 (0x12c000) */
+	/* MIM2_IMAGE                              0x130000 (0x12c000) */
+/*********** !!!  FIXED SECTIONS  !!! DO NOT MODIFY !!! **********************/
+};                              /* 0x134 */
+
+#define NVM_OFFSET(f)	((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f))))
+
+struct hw_set_info {
+	u32	reg_type;
+#define GRC_REG_TYPE 1
+#define PHY_REG_TYPE 2
+#define PCI_REG_TYPE 4
+
+	u32	bank_num;
+	u32	pf_num;
+	u32	operation;
+#define READ_OP     1
+#define WRITE_OP    2
+#define RMW_SET_OP  3
+#define RMW_CLR_OP  4
+
+	u32	reg_addr;
+	u32	reg_data;
+
+	u32	reset_type;
+#define POR_RESET_TYPE	BIT(0)
+#define HARD_RESET_TYPE	BIT(1)
+#define CORE_RESET_TYPE	BIT(2)
+#define MCP_RESET_TYPE	BIT(3)
+#define PERSET_ASSERT	BIT(4)
+#define PERSET_DEASSERT	BIT(5)
+};
+
+struct hw_set_image {
+	u32			format_version;
+#define HW_SET_IMAGE_VERSION        1
+	u32			no_hw_sets;
+
+	/* This array length depends on the no_hw_sets */
+	struct hw_set_info	hw_sets[1];
+};
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
new file mode 100644
index 0000000..ffa9927
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -0,0 +1,776 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_reg_addr.h"
+
+#define QED_BAR_ACQUIRE_TIMEOUT 1000
+
+/* Invalid values */
+#define QED_BAR_INVALID_OFFSET          (cpu_to_le32(-1))
+
+struct qed_ptt {
+	struct list_head	list_entry;
+	unsigned int		idx;
+	struct pxp_ptt_entry	pxp;
+};
+
+struct qed_ptt_pool {
+	struct list_head	free_list;
+	spinlock_t		lock; /* ptt synchronized access */
+	struct qed_ptt		ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
+{
+	struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
+					      GFP_ATOMIC);
+	int i;
+
+	if (!p_pool)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&p_pool->free_list);
+	for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+		p_pool->ptts[i].idx = i;
+		p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
+		p_pool->ptts[i].pxp.pretend.control = 0;
+		if (i >= RESERVED_PTT_MAX)
+			list_add(&p_pool->ptts[i].list_entry,
+				 &p_pool->free_list);
+	}
+
+	p_hwfn->p_ptt_pool = p_pool;
+	spin_lock_init(&p_pool->lock);
+
+	return 0;
+}
+
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
+{
+	struct qed_ptt *p_ptt;
+	int i;
+
+	for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+		p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+		p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
+	}
+}
+
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
+{
+	kfree(p_hwfn->p_ptt_pool);
+	p_hwfn->p_ptt_pool = NULL;
+}
+
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+{
+	struct qed_ptt *p_ptt;
+	unsigned int i;
+
+	/* Take the free PTT from the list */
+	for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
+		spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+
+		if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
+			p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
+						 struct qed_ptt, list_entry);
+			list_del(&p_ptt->list_entry);
+
+			spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+
+			DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+				   "allocated ptt %d\n", p_ptt->idx);
+			return p_ptt;
+		}
+
+		spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+		usleep_range(1000, 2000);
+	}
+
+	DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
+	return NULL;
+}
+
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt)
+{
+	spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+	list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+	spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+}
+
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+			struct qed_ptt *p_ptt)
+{
+	/* The HW is using DWORDS and we need to translate it to Bytes */
+	return le32_to_cpu(p_ptt->pxp.offset) << 2;
+}
+
+static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
+{
+	return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+	       p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
+{
+	return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+	       p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt,
+		     u32 new_hw_addr)
+{
+	u32 prev_hw_addr;
+
+	prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+
+	if (new_hw_addr == prev_hw_addr)
+		return;
+
+	/* Update PTT entery in admin window */
+	DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+		   "Updating PTT entry %d to offset 0x%x\n",
+		   p_ptt->idx, new_hw_addr);
+
+	/* The HW is using DWORDS and the address is in Bytes */
+	p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
+
+	REG_WR(p_hwfn,
+	       qed_ptt_config_addr(p_ptt) +
+	       offsetof(struct pxp_ptt_entry, offset),
+	       le32_to_cpu(p_ptt->pxp.offset));
+}
+
+static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
+		       struct qed_ptt *p_ptt,
+		       u32 hw_addr)
+{
+	u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+	u32 offset;
+
+	offset = hw_addr - win_hw_addr;
+
+	/* Verify the address is within the window */
+	if (hw_addr < win_hw_addr ||
+	    offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+		qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+		offset = 0;
+	}
+
+	return qed_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+				     enum reserved_ptts ptt_idx)
+{
+	if (ptt_idx >= RESERVED_PTT_MAX) {
+		DP_NOTICE(p_hwfn,
+			  "Requested PTT %d is out of range\n", ptt_idx);
+		return NULL;
+	}
+
+	return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+void qed_wr(struct qed_hwfn *p_hwfn,
+	    struct qed_ptt *p_ptt,
+	    u32 hw_addr, u32 val)
+{
+	u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+
+	REG_WR(p_hwfn, bar_addr, val);
+	DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+		   "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+		   bar_addr, hw_addr, val);
+}
+
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+	   struct qed_ptt *p_ptt,
+	   u32 hw_addr)
+{
+	u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+	u32 val = REG_RD(p_hwfn, bar_addr);
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+		   "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+		   bar_addr, hw_addr, val);
+
+	return val;
+}
+
+static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
+			  struct qed_ptt *p_ptt,
+			  void *addr,
+			  u32 hw_addr,
+			  size_t n,
+			  bool to_device)
+{
+	u32 dw_count, *host_addr, hw_offset;
+	size_t quota, done = 0;
+	u32 __iomem *reg_addr;
+
+	while (done < n) {
+		quota = min_t(size_t, n - done,
+			      PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+		qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+		hw_offset = qed_ptt_get_bar_addr(p_ptt);
+
+		dw_count = quota / 4;
+		host_addr = (u32 *)((u8 *)addr + done);
+		reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
+		if (to_device)
+			while (dw_count--)
+				DIRECT_REG_WR(reg_addr++, *host_addr++);
+		else
+			while (dw_count--)
+				*host_addr++ = DIRECT_REG_RD(reg_addr++);
+
+		done += quota;
+	}
+}
+
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt,
+		     void *dest, u32 hw_addr, size_t n)
+{
+	DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+		   "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+		   hw_addr, dest, hw_addr, (unsigned long)n);
+
+	qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+		   struct qed_ptt *p_ptt,
+		   u32 hw_addr, void *src, size_t n)
+{
+	DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+		   "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+		   hw_addr, hw_addr, src, (unsigned long)n);
+
+	qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt,
+		     u16 fid)
+{
+	u16 control = 0;
+
+	SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+	/* Every pretend undos previous pretends, including
+	 * previous port pretend.
+	 */
+	SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+	if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+		fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+	p_ptt->pxp.pretend.control = cpu_to_le16(control);
+	p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
+
+	REG_WR(p_hwfn,
+	       qed_ptt_config_addr(p_ptt) +
+	       offsetof(struct pxp_ptt_entry, pretend),
+	       *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+		      struct qed_ptt *p_ptt,
+		      u8 port_id)
+{
+	u16 control = 0;
+
+	SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+	p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+	REG_WR(p_hwfn,
+	       qed_ptt_config_addr(p_ptt) +
+	       offsetof(struct pxp_ptt_entry, pretend),
+	       *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+			struct qed_ptt *p_ptt)
+{
+	u16 control = 0;
+
+	SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+	SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+	SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+	p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+	REG_WR(p_hwfn,
+	       qed_ptt_config_addr(p_ptt) +
+	       offsetof(struct pxp_ptt_entry, pretend),
+	       *(u32 *)&p_ptt->pxp.pretend);
+}
+
+/* DMAE */
+static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
+			    const u8 is_src_type_grc,
+			    const u8 is_dst_type_grc,
+			    struct qed_dmae_params *p_params)
+{
+	u32 opcode = 0;
+	u16 opcodeB = 0;
+
+	/* Whether the source is the PCIe or the GRC.
+	 * 0- The source is the PCIe
+	 * 1- The source is the GRC.
+	 */
+	opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
+				   : DMAE_CMD_SRC_MASK_PCIE) <<
+		   DMAE_CMD_SRC_SHIFT;
+	opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+		   DMAE_CMD_SRC_PF_ID_SHIFT);
+
+	/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+	opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
+				   : DMAE_CMD_DST_MASK_PCIE) <<
+		   DMAE_CMD_DST_SHIFT;
+	opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+		   DMAE_CMD_DST_PF_ID_SHIFT);
+
+	/* Whether to write a completion word to the completion destination:
+	 * 0-Do not write a completion word
+	 * 1-Write the completion word
+	 */
+	opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
+	opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+		   DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+	if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
+		opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
+
+	opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
+
+	opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
+
+	/* reset source address in next go */
+	opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
+		   DMAE_CMD_SRC_ADDR_RESET_SHIFT);
+
+	/* reset dest address in next go */
+	opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
+		   DMAE_CMD_DST_ADDR_RESET_SHIFT);
+
+	opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
+		    DMAE_CMD_SRC_VF_ID_SHIFT);
+
+	opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
+		    DMAE_CMD_DST_VF_ID_SHIFT);
+
+	p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
+	p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
+}
+
+u32 qed_dmae_idx_to_go_cmd(u8 idx)
+{
+	/* All the DMAE 'go' registers form an array in internal memory */
+	return DMAE_REG_GO_C0 + (idx << 2);
+}
+
+static int
+qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+		      struct qed_ptt *p_ptt)
+{
+	struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
+	u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+	int qed_status = 0;
+
+	/* verify address is not NULL */
+	if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
+	     ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
+		DP_NOTICE(p_hwfn,
+			  "source or destination address 0 idx_cmd=%d\n"
+			  "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+			   idx_cmd,
+			   le32_to_cpu(command->opcode),
+			   le16_to_cpu(command->opcode_b),
+			   le16_to_cpu(command->length),
+			   le32_to_cpu(command->src_addr_hi),
+			   le32_to_cpu(command->src_addr_lo),
+			   le32_to_cpu(command->dst_addr_hi),
+			   le32_to_cpu(command->dst_addr_lo));
+
+		return -EINVAL;
+	}
+
+	DP_VERBOSE(p_hwfn,
+		   NETIF_MSG_HW,
+		   "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+		   idx_cmd,
+		   le32_to_cpu(command->opcode),
+		   le16_to_cpu(command->opcode_b),
+		   le16_to_cpu(command->length),
+		   le32_to_cpu(command->src_addr_hi),
+		   le32_to_cpu(command->src_addr_lo),
+		   le32_to_cpu(command->dst_addr_hi),
+		   le32_to_cpu(command->dst_addr_lo));
+
+	/* Copy the command to DMAE - need to do it before every call
+	 * for source/dest address no reset.
+	 * The first 9 DWs are the command registers, the 10 DW is the
+	 * GO register, and the rest are result registers
+	 * (which are read only by the client).
+	 */
+	for (i = 0; i < DMAE_CMD_SIZE; i++) {
+		u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+			   *(((u32 *)command) + i) : 0;
+
+		qed_wr(p_hwfn, p_ptt,
+		       DMAE_REG_CMD_MEM +
+		       (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+		       (i * sizeof(u32)), data);
+	}
+
+	qed_wr(p_hwfn, p_ptt,
+	       qed_dmae_idx_to_go_cmd(idx_cmd),
+	       DMAE_GO_VALUE);
+
+	return qed_status;
+}
+
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
+{
+	dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+	struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+	u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+	u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+	*p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+				     sizeof(u32),
+				     p_addr,
+				     GFP_KERNEL);
+	if (!*p_comp) {
+		DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
+		goto err;
+	}
+
+	p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+	*p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+				    sizeof(struct dmae_cmd),
+				    p_addr, GFP_KERNEL);
+	if (!*p_cmd) {
+		DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
+		goto err;
+	}
+
+	p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+	*p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+				     sizeof(u32) * DMAE_MAX_RW_SIZE,
+				     p_addr, GFP_KERNEL);
+	if (!*p_buff) {
+		DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
+		goto err;
+	}
+
+	p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+
+	return 0;
+err:
+	qed_dmae_info_free(p_hwfn);
+	return -ENOMEM;
+}
+
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
+{
+	dma_addr_t p_phys;
+
+	/* Just make sure no one is in the middle */
+	mutex_lock(&p_hwfn->dmae_info.mutex);
+
+	if (p_hwfn->dmae_info.p_completion_word) {
+		p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  sizeof(u32),
+				  p_hwfn->dmae_info.p_completion_word,
+				  p_phys);
+		p_hwfn->dmae_info.p_completion_word = NULL;
+	}
+
+	if (p_hwfn->dmae_info.p_dmae_cmd) {
+		p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  sizeof(struct dmae_cmd),
+				  p_hwfn->dmae_info.p_dmae_cmd,
+				  p_phys);
+		p_hwfn->dmae_info.p_dmae_cmd = NULL;
+	}
+
+	if (p_hwfn->dmae_info.p_intermediate_buffer) {
+		p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  sizeof(u32) * DMAE_MAX_RW_SIZE,
+				  p_hwfn->dmae_info.p_intermediate_buffer,
+				  p_phys);
+		p_hwfn->dmae_info.p_intermediate_buffer = NULL;
+	}
+
+	mutex_unlock(&p_hwfn->dmae_info.mutex);
+}
+
+static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
+{
+	u32 wait_cnt = 0;
+	u32 wait_cnt_limit = 10000;
+
+	int qed_status = 0;
+
+	barrier();
+	while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
+		udelay(DMAE_MIN_WAIT_TIME);
+		if (++wait_cnt > wait_cnt_limit) {
+			DP_NOTICE(p_hwfn->cdev,
+				  "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
+				  *p_hwfn->dmae_info.p_completion_word,
+				 DMAE_COMPLETION_VAL);
+			qed_status = -EBUSY;
+			break;
+		}
+
+		/* to sync the completion_word since we are not
+		 * using the volatile keyword for p_completion_word
+		 */
+		barrier();
+	}
+
+	if (qed_status == 0)
+		*p_hwfn->dmae_info.p_completion_word = 0;
+
+	return qed_status;
+}
+
+static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
+					  struct qed_ptt *p_ptt,
+					  u64 src_addr,
+					  u64 dst_addr,
+					  u8 src_type,
+					  u8 dst_type,
+					  u32 length)
+{
+	dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+	struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+	int qed_status = 0;
+
+	switch (src_type) {
+	case QED_DMAE_ADDRESS_GRC:
+	case QED_DMAE_ADDRESS_HOST_PHYS:
+		cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
+		cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
+		break;
+	/* for virtual source addresses we use the intermediate buffer. */
+	case QED_DMAE_ADDRESS_HOST_VIRT:
+		cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
+		cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
+		memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
+		       (void *)(uintptr_t)src_addr,
+		       length * sizeof(u32));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (dst_type) {
+	case QED_DMAE_ADDRESS_GRC:
+	case QED_DMAE_ADDRESS_HOST_PHYS:
+		cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
+		cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
+		break;
+	/* for virtual source addresses we use the intermediate buffer. */
+	case QED_DMAE_ADDRESS_HOST_VIRT:
+		cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
+		cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	cmd->length = cpu_to_le16((u16)length);
+
+	qed_dmae_post_command(p_hwfn, p_ptt);
+
+	qed_status = qed_dmae_operation_wait(p_hwfn);
+
+	if (qed_status) {
+		DP_NOTICE(p_hwfn,
+			  "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
+			  src_addr,
+			  dst_addr,
+			  length);
+		return qed_status;
+	}
+
+	if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
+		memcpy((void *)(uintptr_t)(dst_addr),
+		       &p_hwfn->dmae_info.p_intermediate_buffer[0],
+		       length * sizeof(u32));
+
+	return 0;
+}
+
+static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
+				    struct qed_ptt *p_ptt,
+				    u64 src_addr, u64 dst_addr,
+				    u8 src_type, u8 dst_type,
+				    u32 size_in_dwords,
+				    struct qed_dmae_params *p_params)
+{
+	dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+	u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+	struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+	u64 src_addr_split = 0, dst_addr_split = 0;
+	u16 length_limit = DMAE_MAX_RW_SIZE;
+	int qed_status = 0;
+	u32 offset = 0;
+
+	qed_dmae_opcode(p_hwfn,
+			(src_type == QED_DMAE_ADDRESS_GRC),
+			(dst_type == QED_DMAE_ADDRESS_GRC),
+			p_params);
+
+	cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
+	cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
+	cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
+
+	/* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+	cnt_split = size_in_dwords / length_limit;
+	length_mod = size_in_dwords % length_limit;
+
+	src_addr_split = src_addr;
+	dst_addr_split = dst_addr;
+
+	for (i = 0; i <= cnt_split; i++) {
+		offset = length_limit * i;
+
+		if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
+			if (src_type == QED_DMAE_ADDRESS_GRC)
+				src_addr_split = src_addr + offset;
+			else
+				src_addr_split = src_addr + (offset * 4);
+		}
+
+		if (dst_type == QED_DMAE_ADDRESS_GRC)
+			dst_addr_split = dst_addr + offset;
+		else
+			dst_addr_split = dst_addr + (offset * 4);
+
+		length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+		/* might be zero on last iteration */
+		if (!length_cur)
+			continue;
+
+		qed_status = qed_dmae_execute_sub_operation(p_hwfn,
+							    p_ptt,
+							    src_addr_split,
+							    dst_addr_split,
+							    src_type,
+							    dst_type,
+							    length_cur);
+		if (qed_status) {
+			DP_NOTICE(p_hwfn,
+				  "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
+				  qed_status,
+				  src_addr,
+				  dst_addr,
+				  length_cur);
+			break;
+		}
+	}
+
+	return qed_status;
+}
+
+int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+		      struct qed_ptt *p_ptt,
+		      u64 source_addr,
+		      u32 grc_addr,
+		      u32 size_in_dwords,
+		      u32 flags)
+{
+	u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+	struct qed_dmae_params params;
+	int rc;
+
+	memset(&params, 0, sizeof(struct qed_dmae_params));
+	params.flags = flags;
+
+	mutex_lock(&p_hwfn->dmae_info.mutex);
+
+	rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+				      grc_addr_in_dw,
+				      QED_DMAE_ADDRESS_HOST_VIRT,
+				      QED_DMAE_ADDRESS_GRC,
+				      size_in_dwords, &params);
+
+	mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+	return rc;
+}
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+		  enum protocol_type proto,
+		  union qed_qm_pq_params *p_params)
+{
+	u16 pq_id = 0;
+
+	if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) &&
+	    !p_params) {
+		DP_NOTICE(p_hwfn,
+			  "Protocol %d received NULL PQ params\n",
+			  proto);
+		return 0;
+	}
+
+	switch (proto) {
+	case PROTOCOLID_CORE:
+		if (p_params->core.tc == LB_TC)
+			pq_id = p_hwfn->qm_info.pure_lb_pq;
+		else
+			pq_id = p_hwfn->qm_info.offload_pq;
+		break;
+	case PROTOCOLID_ETH:
+		pq_id = p_params->eth.tc;
+		break;
+	default:
+		pq_id = 0;
+	}
+
+	pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
+
+	return pq_id;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
new file mode 100644
index 0000000..e56d433
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -0,0 +1,263 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_HW_H
+#define _QED_HW_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+
+/* Forward decleration */
+struct qed_ptt;
+
+enum reserved_ptts {
+	RESERVED_PTT_EDIAG,
+	RESERVED_PTT_USER_SPACE,
+	RESERVED_PTT_MAIN,
+	RESERVED_PTT_DPC,
+	RESERVED_PTT_MAX
+};
+
+enum _dmae_cmd_dst_mask {
+	DMAE_CMD_DST_MASK_NONE	= 0,
+	DMAE_CMD_DST_MASK_PCIE	= 1,
+	DMAE_CMD_DST_MASK_GRC	= 2
+};
+
+enum _dmae_cmd_src_mask {
+	DMAE_CMD_SRC_MASK_PCIE	= 0,
+	DMAE_CMD_SRC_MASK_GRC	= 1
+};
+
+enum _dmae_cmd_crc_mask {
+	DMAE_CMD_COMP_CRC_EN_MASK_NONE	= 0,
+	DMAE_CMD_COMP_CRC_EN_MASK_SET	= 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE   0x1
+
+#define DMAE_COMPLETION_VAL     0xD1AE
+#define DMAE_CMD_ENDIANITY      0x2
+
+#define DMAE_CMD_SIZE   14
+#define DMAE_CMD_SIZE_TO_FILL   (DMAE_CMD_SIZE - 5)
+#define DMAE_MIN_WAIT_TIME      0x2
+#define DMAE_MAX_CLIENTS        32
+
+/**
+ * @brief qed_gtt_init - Initialize GTT windows
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
+ *
+ * @param p_hwfn
+ *
+ * @return struct _qed_status - success (0), negative - error.
+ */
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_pool_free -
+ *
+ * @param p_hwfn
+ */
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+			struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
+ *
+ * @param p_hwfn
+ * @param new_hw_addr
+ * @param p_ptt
+ */
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt,
+		     u32 new_hw_addr);
+
+/**
+ * @brief qed_get_reserved_ptt - Get a specific reserved PTT
+ *
+ * @param p_hwfn
+ * @param ptt_idx
+ *
+ * @return struct qed_ptt *
+ */
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+				     enum reserved_ptts ptt_idx);
+
+/**
+ * @brief qed_wr - Write value to BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+void qed_wr(struct qed_hwfn *p_hwfn,
+	    struct qed_ptt *p_ptt,
+	    u32 hw_addr,
+	    u32 val);
+
+/**
+ * @brief qed_rd - Read value from BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+	   struct qed_ptt *p_ptt,
+	   u32 hw_addr);
+
+/**
+ * @brief qed_memcpy_from - copy n bytes from BAR using the given
+ *        ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param dest
+ * @param hw_addr
+ * @param n
+ */
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt,
+		     void *dest,
+		     u32 hw_addr,
+		     size_t n);
+
+/**
+ * @brief qed_memcpy_to - copy n bytes to BAR using the given
+ *        ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param hw_addr
+ * @param src
+ * @param n
+ */
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+		   struct qed_ptt *p_ptt,
+		   u32 hw_addr,
+		   void *src,
+		   size_t n);
+/**
+ * @brief qed_fid_pretend - pretend to another function when
+ *        accessing the ptt window. There is no way to unpretend
+ *        a function. The only way to cancel a pretend is to
+ *        pretend back to the original function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param fid - fid field of pxp_pretend structure. Can contain
+ *            either pf / vf, port/path fields are don't care.
+ */
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt,
+		     u16 fid);
+
+/**
+ * @brief qed_port_pretend - pretend to another port when
+ *        accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ */
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+		      struct qed_ptt *p_ptt,
+		      u8 port_id);
+
+/**
+ * @brief qed_port_unpretend - cancel any previously set port
+ *        pretend
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+			struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
+ * this is declared here since other files will require it.
+ * @param idx
+ */
+u32 qed_dmae_idx_to_go_cmd(u8 idx);
+
+/**
+ * @brief qed_dmae_info_alloc - Init the dmae_info structure
+ * which is part of p_hwfn.
+ * @param p_hwfn
+ */
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_dmae_info_free - Free the dmae_info structure
+ * which is part of p_hwfn
+ *
+ * @param p_hwfn
+ */
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
+
+union qed_qm_pq_params {
+	struct {
+		u8 tc;
+	}	core;
+
+	struct {
+		u8	is_vf;
+		u8	vf_id;
+		u8	tc;
+	}	eth;
+};
+
+u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
+		  enum protocol_type proto,
+		  union qed_qm_pq_params *params);
+
+int qed_init_fw_data(struct qed_dev *cdev,
+		     const u8 *fw_data);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
new file mode 100644
index 0000000..0b21a55
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -0,0 +1,798 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+enum cminterface {
+	MCM_SEC,
+	MCM_PRI,
+	UCM_SEC,
+	UCM_PRI,
+	TCM_SEC,
+	TCM_PRI,
+	YCM_SEC,
+	YCM_PRI,
+	XCM_SEC,
+	XCM_PRI,
+	NUM_OF_CM_INTERFACES
+};
+
+/* general constants */
+#define QM_PQ_ELEMENT_SIZE                      4 /* in bytes */
+#define QM_PQ_MEM_4KB(pq_size)	(pq_size ? DIV_ROUND_UP((pq_size + 1) *	\
+							QM_PQ_ELEMENT_SIZE, \
+							0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size)	(pq_size ? DIV_ROUND_UP(pq_size, \
+								0x100) - 1 : 0)
+#define QM_INVALID_PQ_ID                        0xffff
+/* feature enable */
+#define QM_BYPASS_EN                            1
+#define QM_BYTE_CRD_EN                          1
+/* other PQ constants */
+#define QM_OTHER_PQS_PER_PF                     4
+/* WFQ constants */
+#define QM_WFQ_UPPER_BOUND		6250000
+#define QM_WFQ_VP_PQ_VOQ_SHIFT          0
+#define QM_WFQ_VP_PQ_PF_SHIFT           5
+#define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000)
+#define QM_WFQ_MAX_INC_VAL                      4375000
+#define QM_WFQ_INIT_CRD(inc_val)        (2 * (inc_val))
+/* RL constants */
+#define QM_RL_UPPER_BOUND                       6250000
+#define QM_RL_PERIOD                            5               /* in us */
+#define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD)
+#define QM_RL_INC_VAL(rate)		max_t(u32,	\
+					      (((rate ? rate : 1000000)	\
+						* QM_RL_PERIOD) / 8), 1)
+#define QM_RL_MAX_INC_VAL                       4375000
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF           1
+#define QM_OPPOR_FW_STOP_DEF            0
+#define QM_OPPOR_PQ_EMPTY_DEF           1
+#define EAGLE_WORKAROUND_TC                     7
+/* Command Queue constants */
+#define PBF_CMDQ_PURE_LB_LINES                          150
+#define PBF_CMDQ_EAGLE_WORKAROUND_LINES         8
+#define PBF_CMDQ_LINES_RT_OFFSET(voq)           (		 \
+		PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+		(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET -	 \
+		 PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+#define PBF_BTB_GUARANTEED_RT_OFFSET(voq)       (	      \
+		PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+		(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET -      \
+		 PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines)          ((((pbf_cmd_lines) - \
+						   4) *		     \
+						  2) | QM_LINE_CRD_REG_SIGN_BIT)
+/* BTB: blocks constants (block size = 256B) */
+#define BTB_JUMBO_PKT_BLOCKS            38
+#define BTB_HEADROOM_BLOCKS                     BTB_JUMBO_PKT_BLOCKS
+#define BTB_EAGLE_WORKAROUND_BLOCKS     4
+#define BTB_PURE_LB_FACTOR                      10
+#define BTB_PURE_LB_RATIO                       7
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH                   32
+#define QM_STOP_CMD_ADDR                                0x2
+#define QM_STOP_CMD_STRUCT_SIZE                 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
+#define QM_STOP_CMD_PAUSE_MASK_MASK             -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET             1
+#define QM_STOP_CMD_GROUP_ID_SHIFT              16
+#define QM_STOP_CMD_GROUP_ID_MASK               15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET              1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT               24
+#define QM_STOP_CMD_PQ_TYPE_MASK                1
+#define QM_STOP_CMD_MAX_POLL_COUNT              100
+#define QM_STOP_CMD_POLL_PERIOD_US              500
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd)			cmd ## \
+	_STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field,				  \
+			 value)        SET_FIELD(var[cmd ## _ ## field ## \
+						     _OFFSET],		  \
+						 cmd ## _ ## field,	  \
+						 value)
+/* QM: VOQ macros */
+#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port)	((port) *	\
+						 (max_phy_tcs_pr_port) \
+						 + (tc))
+#define LB_VOQ(port)				( \
+		MAX_PHYS_VOQS + (port))
+#define VOQ(port, tc, max_phy_tcs_pr_port)	\
+	((tc) <		\
+	 LB_TC ? PHYS_VOQ(port,		\
+			  tc,			 \
+			  max_phy_tcs_pr_port) \
+		: LB_VOQ(port))
+/******************** INTERNAL IMPLEMENTATION *********************/
+/* Prepare PF RL enable/disable runtime init values */
+static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
+			     bool pf_rl_en)
+{
+	STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+	if (pf_rl_en) {
+		/* enable RLs for all VOQs */
+		STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
+			     (1 << MAX_NUM_VOQS) - 1);
+		/* write RL period */
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_RLPFPERIOD_RT_OFFSET,
+			     QM_RL_PERIOD_CLK_25M);
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+			     QM_RL_PERIOD_CLK_25M);
+		/* set credit threshold for QM bypass flow */
+		if (QM_BYPASS_EN)
+			STORE_RT_REG(p_hwfn,
+				     QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+				     QM_RL_UPPER_BOUND);
+	}
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
+			      bool pf_wfq_en)
+{
+	STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+	/* set credit threshold for QM bypass flow */
+	if (pf_wfq_en && QM_BYPASS_EN)
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+			     QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare VPORT RL enable/disable runtime init values */
+static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
+				bool vport_rl_en)
+{
+	STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+		     vport_rl_en ? 1 : 0);
+	if (vport_rl_en) {
+		/* write RL period (use timer 0 only) */
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+			     QM_RL_PERIOD_CLK_25M);
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+			     QM_RL_PERIOD_CLK_25M);
+		/* set credit threshold for QM bypass flow */
+		if (QM_BYPASS_EN)
+			STORE_RT_REG(p_hwfn,
+				     QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+				     QM_RL_UPPER_BOUND);
+	}
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
+				 bool vport_wfq_en)
+{
+	STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+		     vport_wfq_en ? 1 : 0);
+	/* set credit threshold for QM bypass flow */
+	if (vport_wfq_en && QM_BYPASS_EN)
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+			     QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+ * the specified VOQ
+ */
+static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
+				       u8 voq,
+				       u16 cmdq_lines)
+{
+	u32 qm_line_crd;
+
+	/* In A0 - Limit the size of pbf queue so that only 511 commands with
+	 * the minimum size of 4 (FCoE minimum size)
+	 */
+	bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+
+	if (is_bb_a0)
+		cmdq_lines = min_t(u32, cmdq_lines, 1022);
+	qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+			 (u32)cmdq_lines);
+	STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
+	STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+		     qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void qed_cmdq_lines_rt_init(
+	struct qed_hwfn *p_hwfn,
+	u8 max_ports_per_engine,
+	u8 max_phys_tcs_per_port,
+	struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+	u8 tc, voq, port_id;
+
+	/* clear PBF lines for all VOQs */
+	for (voq = 0; voq < MAX_NUM_VOQS; voq++)
+		STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+		if (port_params[port_id].active) {
+			u16 phys_lines, phys_lines_per_tc;
+			u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
+
+			/* find #lines to divide between the active
+			 * physical TCs.
+			 */
+			phys_lines = port_params[port_id].num_pbf_cmd_lines -
+				     PBF_CMDQ_PURE_LB_LINES;
+			/* find #lines per active physical TC */
+			phys_lines_per_tc = phys_lines / phys_tcs;
+			/* init registers per active TC */
+			for (tc = 0; tc < phys_tcs; tc++) {
+				voq = PHYS_VOQ(port_id, tc,
+					       max_phys_tcs_per_port);
+				qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
+							   phys_lines_per_tc);
+			}
+			/* init registers for pure LB TC */
+			qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+						   PBF_CMDQ_PURE_LB_LINES);
+		}
+	}
+}
+
+static void qed_btb_blocks_rt_init(
+	struct qed_hwfn *p_hwfn,
+	u8 max_ports_per_engine,
+	u8 max_phys_tcs_per_port,
+	struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+	u32 usable_blocks, pure_lb_blocks, phys_blocks;
+	u8 tc, voq, port_id;
+
+	for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+		u32 temp;
+		u8 phys_tcs;
+
+		if (!port_params[port_id].active)
+			continue;
+
+		phys_tcs = port_params[port_id].num_active_phys_tcs;
+
+		/* subtract headroom blocks */
+		usable_blocks = port_params[port_id].num_btb_blocks -
+				BTB_HEADROOM_BLOCKS;
+
+		/* find blocks per physical TC. use factor to avoid
+		 * floating arithmethic.
+		 */
+		pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+				 (phys_tcs * BTB_PURE_LB_FACTOR +
+				  BTB_PURE_LB_RATIO);
+		pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
+				       pure_lb_blocks / BTB_PURE_LB_FACTOR);
+		phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs;
+
+		/* init physical TCs */
+		for (tc = 0; tc < phys_tcs; tc++) {
+			voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
+			STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+				     phys_blocks);
+		}
+
+		/* init pure LB TC */
+		temp = LB_VOQ(port_id);
+		STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp),
+			     pure_lb_blocks);
+	}
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static void qed_tx_pq_map_rt_init(
+	struct qed_hwfn *p_hwfn,
+	struct qed_ptt *p_ptt,
+	struct qed_qm_pf_rt_init_params *p_params,
+	u32 base_mem_addr_4kb)
+{
+	struct init_qm_vport_params *vport_params = p_params->vport_params;
+	u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+	u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+	u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
+			    QM_PF_QUEUE_GROUP_SIZE;
+	bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
+	u16 i, pq_id, pq_group;
+
+	/* a bit per Tx PQ indicating if the PQ is associated with a VF */
+	u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+	u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
+	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+	u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+	u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+	u32 mem_addr_4kb = base_mem_addr_4kb;
+
+	/* set mapping from PQ group to PF */
+	for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+		STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+			     (u32)(p_params->pf_id));
+	/* set PQ sizes */
+	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+		     QM_PQ_SIZE_256B(p_params->num_pf_cids));
+	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+		     QM_PQ_SIZE_256B(p_params->num_vf_cids));
+
+	/* go over all Tx PQs */
+	for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
+		u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+			     p_params->max_phys_tcs_per_port);
+		bool is_vf_pq = (i >= p_params->num_pf_pqs);
+		struct qm_rf_pq_map tx_pq_map;
+
+		/* update first Tx PQ of VPORT/TC */
+		u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
+				    p_params->start_vport;
+		u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0];
+		u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id];
+
+		if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+			/* create new VP PQ */
+			pq_ids[p_params->pq_params[i].tc_id] = pq_id;
+			first_tx_pq_id = pq_id;
+			/* map VP PQ to VOQ and PF */
+			STORE_RT_REG(p_hwfn,
+				     QM_REG_WFQVPMAP_RT_OFFSET +
+				     first_tx_pq_id,
+				     (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+				     (p_params->pf_id <<
+				      QM_WFQ_VP_PQ_PF_SHIFT));
+		}
+		/* fill PQ map entry */
+		memset(&tx_pq_map, 0, sizeof(tx_pq_map));
+		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
+		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
+			  is_vf_pq ? 1 : 0);
+		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
+		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
+			  is_vf_pq ? p_params->pq_params[i].vport_id : 0);
+		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
+		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
+			  p_params->pq_params[i].wrr_group);
+		/* write PQ map entry to CAM */
+		STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
+			     *((u32 *)&tx_pq_map));
+		/* set base address */
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+			     mem_addr_4kb);
+		/* check if VF PQ */
+		if (is_vf_pq) {
+			/* if PQ is associated with a VF, add indication
+			 * to PQ VF mask
+			 */
+			tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
+				(1 << (pq_id % tx_pq_vf_mask_width));
+			mem_addr_4kb += vport_pq_mem_4kb;
+		} else {
+			mem_addr_4kb += pq_mem_4kb;
+		}
+	}
+
+	/* store Tx PQ VF mask to size select register */
+	for (i = 0; i < num_tx_pq_vf_masks; i++) {
+		if (tx_pq_vf_mask[i]) {
+			if (is_bb_a0) {
+				u32 curr_mask = 0, addr;
+
+				addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
+				if (!p_params->is_first_pf)
+					curr_mask = qed_rd(p_hwfn, p_ptt,
+							   addr);
+
+				addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+
+				STORE_RT_REG(p_hwfn, addr,
+					     curr_mask | tx_pq_vf_mask[i]);
+			} else {
+				u32 addr;
+
+				addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+				STORE_RT_REG(p_hwfn, addr,
+					     tx_pq_vf_mask[i]);
+			}
+		}
+	}
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+				     u8 port_id,
+				     u8 pf_id,
+				     u32 num_pf_cids,
+				     u32 num_tids,
+				     u32 base_mem_addr_4kb)
+{
+	u16 i, pq_id;
+
+	/* a single other PQ group is used in each PF,
+	 * where PQ group i is used in PF i.
+	 */
+	u16 pq_group = pf_id;
+	u32 pq_size = num_pf_cids + num_tids;
+	u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+	u32 mem_addr_4kb = base_mem_addr_4kb;
+
+	/* map PQ group to PF */
+	STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+		     (u32)(pf_id));
+	/* set PQ sizes */
+	STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+		     QM_PQ_SIZE_256B(pq_size));
+	/* set base address */
+	for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+	     i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+			     mem_addr_4kb);
+		mem_addr_4kb += pq_mem_4kb;
+	}
+}
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
+			      struct qed_qm_pf_rt_init_params *p_params)
+{
+	u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+	u32 crd_reg_offset;
+	u32 inc_val;
+	u16 i;
+
+	if (p_params->pf_id < MAX_NUM_PFS_BB)
+		crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
+	else
+		crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
+				 (p_params->pf_id % MAX_NUM_PFS_BB);
+
+	inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
+	if (inc_val > QM_WFQ_MAX_INC_VAL) {
+		DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+		return -1;
+	}
+	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+		     inc_val);
+	STORE_RT_REG(p_hwfn,
+		     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
+		     QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+
+	for (i = 0; i < num_tx_pqs; i++) {
+		u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
+			     p_params->max_phys_tcs_per_port);
+
+		OVERWRITE_RT_REG(p_hwfn,
+				 crd_reg_offset + voq * MAX_NUM_PFS_BB,
+				 QM_WFQ_INIT_CRD(inc_val) |
+				 QM_WFQ_CRD_REG_SIGN_BIT);
+	}
+
+	return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
+			     u8 pf_id,
+			     u32 pf_rl)
+{
+	u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+	if (inc_val > QM_RL_MAX_INC_VAL) {
+		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+		return -1;
+	}
+	STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+		     QM_RL_CRD_REG_SIGN_BIT);
+	STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+		     QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+	STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+	return 0;
+}
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
+static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
+			      u8 start_vport,
+			      u8 num_vports,
+			      struct init_qm_vport_params *vport_params)
+{
+	u8 tc, i, vport_id;
+	u32 inc_val;
+
+	/* go over all PF VPORTs */
+	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+		u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET;
+		u16 *pq_ids = &vport_params[i].first_tx_pq_id[0];
+
+		if (!vport_params[i].vport_wfq)
+			continue;
+
+		inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+		if (inc_val > QM_WFQ_MAX_INC_VAL) {
+			DP_NOTICE(p_hwfn,
+				  "Invalid VPORT WFQ weight configuration");
+			return -1;
+		}
+
+		/* each VPORT can have several VPORT PQ IDs for
+		 * different TCs
+		 */
+		for (tc = 0; tc < NUM_OF_TCS; tc++) {
+			u16 vport_pq_id = pq_ids[tc];
+
+			if (vport_pq_id != QM_INVALID_PQ_ID) {
+				STORE_RT_REG(p_hwfn,
+					     QM_REG_WFQVPWEIGHT_RT_OFFSET +
+					     vport_pq_id, inc_val);
+				STORE_RT_REG(p_hwfn, temp + vport_pq_id,
+					     QM_WFQ_UPPER_BOUND |
+					     QM_WFQ_CRD_REG_SIGN_BIT);
+				STORE_RT_REG(p_hwfn,
+					     QM_REG_WFQVPCRD_RT_OFFSET +
+					     vport_pq_id,
+					     QM_WFQ_INIT_CRD(inc_val) |
+					     QM_WFQ_CRD_REG_SIGN_BIT);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
+				u8 start_vport,
+				u8 num_vports,
+				struct init_qm_vport_params *vport_params)
+{
+	u8 i, vport_id;
+
+	/* go over all PF VPORTs */
+	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+		u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
+
+		if (inc_val > QM_RL_MAX_INC_VAL) {
+			DP_NOTICE(p_hwfn,
+				  "Invalid VPORT rate-limit configuration");
+			return -1;
+		}
+
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+			     QM_RL_CRD_REG_SIGN_BIT);
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
+			     QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+		STORE_RT_REG(p_hwfn,
+			     QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+			     inc_val);
+	}
+
+	return 0;
+}
+
+static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt)
+{
+	u32 reg_val, i;
+
+	for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+	     i++) {
+		udelay(QM_STOP_CMD_POLL_PERIOD_US);
+		reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+	}
+
+	/* check if timeout while waiting for SDM command ready */
+	if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+			   "Timeout when waiting for QM SDM command ready signal\n");
+		return false;
+	}
+
+	return true;
+}
+
+static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
+			    struct qed_ptt *p_ptt,
+			    u32 cmd_addr,
+			    u32 cmd_data_lsb,
+			    u32 cmd_data_msb)
+{
+	if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+		return false;
+
+	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+	qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
+	return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+u32 qed_qm_pf_mem_size(u8 pf_id,
+		       u32 num_pf_cids,
+		       u32 num_vf_cids,
+		       u32 num_tids,
+		       u16 num_pf_pqs,
+		       u16 num_vf_pqs)
+{
+	return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+	       QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+	       QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int qed_qm_common_rt_init(
+	struct qed_hwfn *p_hwfn,
+	struct qed_qm_common_rt_init_params *p_params)
+{
+	/* init AFullOprtnstcCrdMask */
+	u32 mask = (QM_OPPOR_LINE_VOQ_DEF <<
+		    QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+		   (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+		   (p_params->pf_wfq_en <<
+		    QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+		   (p_params->vport_wfq_en <<
+		    QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+		   (p_params->pf_rl_en <<
+		    QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+		   (p_params->vport_rl_en <<
+		    QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+		   (QM_OPPOR_FW_STOP_DEF <<
+		    QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+		   (QM_OPPOR_PQ_EMPTY_DEF <<
+		    QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+
+	STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+	qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+	qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+	qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en);
+	qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+	qed_cmdq_lines_rt_init(p_hwfn,
+			       p_params->max_ports_per_engine,
+			       p_params->max_phys_tcs_per_port,
+			       p_params->port_params);
+	qed_btb_blocks_rt_init(p_hwfn,
+			       p_params->max_ports_per_engine,
+			       p_params->max_phys_tcs_per_port,
+			       p_params->port_params);
+	return 0;
+}
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+		      struct qed_ptt *p_ptt,
+		      struct qed_qm_pf_rt_init_params *p_params)
+{
+	struct init_qm_vport_params *vport_params = p_params->vport_params;
+	u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
+					       p_params->num_tids) *
+				 QM_OTHER_PQS_PER_PF;
+	u8 tc, i;
+
+	/* clear first Tx PQ ID array for each VPORT */
+	for (i = 0; i < p_params->num_vports; i++)
+		for (tc = 0; tc < NUM_OF_TCS; tc++)
+			vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+
+	/* map Other PQs (if any) */
+	qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id,
+				 p_params->num_pf_cids, p_params->num_tids, 0);
+
+	/* map Tx PQs */
+	qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+
+	if (p_params->pf_wfq)
+		if (qed_pf_wfq_rt_init(p_hwfn, p_params))
+			return -1;
+
+	if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
+		return -1;
+
+	if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport,
+			       p_params->num_vports, vport_params))
+		return -1;
+
+	if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport,
+				 p_params->num_vports, vport_params))
+		return -1;
+
+	return 0;
+}
+
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+		   struct qed_ptt *p_ptt,
+		   u8 pf_id,
+		   u32 pf_rl)
+{
+	u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+	if (inc_val > QM_RL_MAX_INC_VAL) {
+		DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration");
+		return -1;
+	}
+
+	qed_wr(p_hwfn, p_ptt,
+	       QM_REG_RLPFCRD + pf_id * 4,
+	       QM_RL_CRD_REG_SIGN_BIT);
+	qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
+	return 0;
+}
+
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+		      struct qed_ptt *p_ptt,
+		      u8 vport_id,
+		      u32 vport_rl)
+{
+	u32 inc_val = QM_RL_INC_VAL(vport_rl);
+
+	if (inc_val > QM_RL_MAX_INC_VAL) {
+		DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
+		return -1;
+	}
+
+	qed_wr(p_hwfn, p_ptt,
+	       QM_REG_RLGLBLCRD + vport_id * 4,
+	       QM_RL_CRD_REG_SIGN_BIT);
+	qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+
+	return 0;
+}
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+			  struct qed_ptt *p_ptt,
+			  bool is_release_cmd,
+			  bool is_tx_pq,
+			  u16 start_pq,
+			  u16 num_pqs)
+{
+	u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+	u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+
+	/* set command's PQ type */
+	QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+
+	for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+		/* set PQ bit in mask (stop command only) */
+		if (!is_release_cmd)
+			pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+
+		/* if last PQ or end of PQ mask, write command */
+		if ((pq_id == last_pq) ||
+		    (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+		     (QM_STOP_PQ_MASK_WIDTH - 1))) {
+			QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+					 PAUSE_MASK, pq_mask);
+			QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD,
+					 GROUP_ID,
+					 pq_id / QM_STOP_PQ_MASK_WIDTH);
+			if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
+					     cmd_arr[0], cmd_arr[1]))
+				return false;
+			pq_mask = 0;
+		}
+	}
+
+	return true;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
new file mode 100644
index 0000000..796f139
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -0,0 +1,531 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_reg_addr.h"
+
+#define QED_INIT_MAX_POLL_COUNT 100
+#define QED_INIT_POLL_PERIOD_US 500
+
+static u32 pxp_global_win[] = {
+	0,
+	0,
+	0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
+	0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
+	0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
+	0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
+	0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
+	0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
+	0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
+	0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
+	0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
+	0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+	0,
+};
+
+void qed_init_iro_array(struct qed_dev *cdev)
+{
+	cdev->iro_arr = iro_arr;
+}
+
+/* Runtime configuration helpers */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
+{
+	int i;
+
+	for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
+		p_hwfn->rt_data[i].b_valid = false;
+}
+
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+			   u32 rt_offset,
+			   u32 val)
+{
+	p_hwfn->rt_data[rt_offset].init_val = val;
+	p_hwfn->rt_data[rt_offset].b_valid = true;
+}
+
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+			   u32 rt_offset,
+			   u32 *val,
+			   size_t size)
+{
+	size_t i;
+
+	for (i = 0; i < size / sizeof(u32); i++) {
+		p_hwfn->rt_data[rt_offset + i].init_val = val[i];
+		p_hwfn->rt_data[rt_offset + i].b_valid = true;
+	}
+}
+
+static void qed_init_rt(struct qed_hwfn *p_hwfn,
+			struct qed_ptt *p_ptt,
+			u32 addr,
+			u32 rt_offset,
+			u32 size)
+{
+	struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
+	u32 i;
+
+	for (i = 0; i < size; i++) {
+		if (!rt_data[i].b_valid)
+			continue;
+		qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
+	}
+}
+
+int qed_init_alloc(struct qed_hwfn *p_hwfn)
+{
+	struct qed_rt_data *rt_data;
+
+	rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
+	if (!rt_data)
+		return -ENOMEM;
+
+	p_hwfn->rt_data = rt_data;
+
+	return 0;
+}
+
+void qed_init_free(struct qed_hwfn *p_hwfn)
+{
+	kfree(p_hwfn->rt_data);
+	p_hwfn->rt_data = NULL;
+}
+
+static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
+			       struct qed_ptt *p_ptt,
+			       u32 addr,
+			       u32 dmae_data_offset,
+			       u32 size,
+			       const u32 *buf,
+			       bool b_must_dmae,
+			       bool b_can_dmae)
+{
+	int rc = 0;
+
+	/* Perform DMAE only for lengthy enough sections or for wide-bus */
+	if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+		const u32 *data = buf + dmae_data_offset;
+		u32 i;
+
+		for (i = 0; i < size; i++)
+			qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+	} else {
+		rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+				       (uintptr_t)(buf + dmae_data_offset),
+				       addr, size, 0);
+	}
+
+	return rc;
+}
+
+static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
+			      struct qed_ptt *p_ptt,
+			      u32 addr,
+			      u32 fill,
+			      u32 fill_count)
+{
+	static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+
+	memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+	/* invoke the DMAE virtual/physical buffer API with
+	 * 1. DMAE init channel
+	 * 2. addr,
+	 * 3. p_hwfb->temp_data,
+	 * 4. fill_count
+	 */
+
+	return qed_dmae_host2grc(p_hwfn, p_ptt,
+				 (uintptr_t)(&zero_buffer[0]),
+				 addr, fill_count,
+				 QED_DMAE_FLAG_RW_REPL_SRC);
+}
+
+static void qed_init_fill(struct qed_hwfn *p_hwfn,
+			  struct qed_ptt *p_ptt,
+			  u32 addr,
+			  u32 fill,
+			  u32 fill_count)
+{
+	u32 i;
+
+	for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+		qed_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
+			      struct qed_ptt *p_ptt,
+			      struct init_write_op *cmd,
+			      bool b_must_dmae,
+			      bool b_can_dmae)
+{
+	u32 data = le32_to_cpu(cmd->data);
+	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+	u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+	u32 offset, output_len, input_len, max_size;
+	struct qed_dev *cdev = p_hwfn->cdev;
+	union init_array_hdr *hdr;
+	const u32 *array_data;
+	int rc = 0;
+	u32 size;
+
+	array_data = cdev->fw_data->arr_data;
+
+	hdr = (union init_array_hdr *)(array_data +
+				       dmae_array_offset);
+	data = le32_to_cpu(hdr->raw.data);
+	switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+	case INIT_ARR_ZIPPED:
+		offset = dmae_array_offset + 1;
+		input_len = GET_FIELD(data,
+				      INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+		max_size = MAX_ZIPPED_SIZE * 4;
+		memset(p_hwfn->unzip_buf, 0, max_size);
+
+		output_len = qed_unzip_data(p_hwfn, input_len,
+					    (u8 *)&array_data[offset],
+					    max_size, (u8 *)p_hwfn->unzip_buf);
+		if (output_len) {
+			rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+						 output_len,
+						 p_hwfn->unzip_buf,
+						 b_must_dmae, b_can_dmae);
+		} else {
+			DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
+			rc = -EINVAL;
+		}
+		break;
+	case INIT_ARR_PATTERN:
+	{
+		u32 repeats = GET_FIELD(data,
+					INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+		u32 i;
+
+		size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+		for (i = 0; i < repeats; i++, addr += size << 2) {
+			rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+						 dmae_array_offset + 1,
+						 size, array_data,
+						 b_must_dmae, b_can_dmae);
+			if (rc)
+				break;
+		}
+		break;
+	}
+	case INIT_ARR_STANDARD:
+		size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+		rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+					 dmae_array_offset + 1,
+					 size, array_data,
+					 b_must_dmae, b_can_dmae);
+		break;
+	}
+
+	return rc;
+}
+
+/* init_ops write command */
+static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
+			   struct qed_ptt *p_ptt,
+			   struct init_write_op *cmd,
+			   bool b_can_dmae)
+{
+	u32 data = le32_to_cpu(cmd->data);
+	u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+	bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+	union init_write_args *arg = &cmd->args;
+	int rc = 0;
+
+	/* Sanitize */
+	if (b_must_dmae && !b_can_dmae) {
+		DP_NOTICE(p_hwfn,
+			  "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
+			  addr);
+		return -EINVAL;
+	}
+
+	switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+	case INIT_SRC_INLINE:
+		qed_wr(p_hwfn, p_ptt, addr,
+		       le32_to_cpu(arg->inline_val));
+		break;
+	case INIT_SRC_ZEROS:
+		if (b_must_dmae ||
+		    (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
+			rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
+						le32_to_cpu(arg->zeros_count));
+		else
+			qed_init_fill(p_hwfn, p_ptt, addr, 0,
+				      le32_to_cpu(arg->zeros_count));
+		break;
+	case INIT_SRC_ARRAY:
+		rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
+					b_must_dmae, b_can_dmae);
+		break;
+	case INIT_SRC_RUNTIME:
+		qed_init_rt(p_hwfn, p_ptt, addr,
+			    le16_to_cpu(arg->runtime.offset),
+			    le16_to_cpu(arg->runtime.size));
+		break;
+	}
+
+	return rc;
+}
+
+static inline bool comp_eq(u32 val, u32 expected_val)
+{
+	return val == expected_val;
+}
+
+static inline bool comp_and(u32 val, u32 expected_val)
+{
+	return (val & expected_val) == expected_val;
+}
+
+static inline bool comp_or(u32 val, u32 expected_val)
+{
+	return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
+			    struct qed_ptt *p_ptt,
+			    struct init_read_op *cmd)
+{
+	u32 data = le32_to_cpu(cmd->op_data);
+	u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+
+	bool	(*comp_check)(u32	val,
+			      u32	expected_val);
+	u32	delay = QED_INIT_POLL_PERIOD_US, val;
+
+	val = qed_rd(p_hwfn, p_ptt, addr);
+
+	data = le32_to_cpu(cmd->op_data);
+	if (GET_FIELD(data, INIT_READ_OP_POLL)) {
+		int i;
+
+		switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
+		case INIT_COMPARISON_EQ:
+			comp_check = comp_eq;
+			break;
+		case INIT_COMPARISON_OR:
+			comp_check = comp_or;
+			break;
+		case INIT_COMPARISON_AND:
+			comp_check = comp_and;
+			break;
+		default:
+			comp_check = NULL;
+			DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+			       data);
+			return;
+		}
+
+		for (i = 0;
+		     i < QED_INIT_MAX_POLL_COUNT &&
+		     !comp_check(val, le32_to_cpu(cmd->expected_val));
+		     i++) {
+			udelay(delay);
+			val = qed_rd(p_hwfn, p_ptt, addr);
+		}
+
+		if (i == QED_INIT_MAX_POLL_COUNT)
+			DP_ERR(p_hwfn,
+			       "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+			       addr, le32_to_cpu(cmd->expected_val),
+			       val, data);
+	}
+}
+
+/* init_ops callbacks entry point */
+static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+			    struct qed_ptt *p_ptt,
+			    struct init_callback_op *p_cmd)
+{
+	DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
+}
+
+static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
+				  u16 *offset,
+				  int modes)
+{
+	struct qed_dev *cdev = p_hwfn->cdev;
+	const u8 *modes_tree_buf;
+	u8 arg1, arg2, tree_val;
+
+	modes_tree_buf = cdev->fw_data->modes_tree_buf;
+	tree_val = modes_tree_buf[(*offset)++];
+	switch (tree_val) {
+	case INIT_MODE_OP_NOT:
+		return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
+	case INIT_MODE_OP_OR:
+		arg1	= qed_init_cmd_mode_match(p_hwfn, offset, modes);
+		arg2	= qed_init_cmd_mode_match(p_hwfn, offset, modes);
+		return arg1 | arg2;
+	case INIT_MODE_OP_AND:
+		arg1	= qed_init_cmd_mode_match(p_hwfn, offset, modes);
+		arg2	= qed_init_cmd_mode_match(p_hwfn, offset, modes);
+		return arg1 & arg2;
+	default:
+		tree_val -= MAX_INIT_MODE_OPS;
+		return (modes & (1 << tree_val)) ? 1 : 0;
+	}
+}
+
+static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
+			     struct init_if_mode_op *p_cmd,
+			     int modes)
+{
+	u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
+
+	if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
+		return 0;
+	else
+		return GET_FIELD(le32_to_cpu(p_cmd->op_data),
+				 INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
+			      struct init_if_phase_op *p_cmd,
+			      u32 phase,
+			      u32 phase_id)
+{
+	u32 data = le32_to_cpu(p_cmd->phase_data);
+	u32 op_data = le32_to_cpu(p_cmd->op_data);
+
+	if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+	      (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+	       GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+		return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
+	else
+		return 0;
+}
+
+int qed_init_run(struct qed_hwfn *p_hwfn,
+		 struct qed_ptt *p_ptt,
+		 int phase,
+		 int phase_id,
+		 int modes)
+{
+	struct qed_dev *cdev = p_hwfn->cdev;
+	u32 cmd_num, num_init_ops;
+	union init_op *init_ops;
+	bool b_dmae = false;
+	int rc = 0;
+
+	num_init_ops = cdev->fw_data->init_ops_size;
+	init_ops = cdev->fw_data->init_ops;
+
+	p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
+	if (!p_hwfn->unzip_buf) {
+		DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
+		return -ENOMEM;
+	}
+
+	for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+		union init_op *cmd = &init_ops[cmd_num];
+		u32 data = le32_to_cpu(cmd->raw.op_data);
+
+		switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+		case INIT_OP_WRITE:
+			rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+					     b_dmae);
+			break;
+		case INIT_OP_READ:
+			qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+			break;
+		case INIT_OP_IF_MODE:
+			cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
+						     modes);
+			break;
+		case INIT_OP_IF_PHASE:
+			cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
+						      phase, phase_id);
+			b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
+			break;
+		case INIT_OP_DELAY:
+			/* qed_init_run is always invoked from
+			 * sleep-able context
+			 */
+			udelay(le32_to_cpu(cmd->delay.delay));
+			break;
+
+		case INIT_OP_CALLBACK:
+			qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+			break;
+		}
+
+		if (rc)
+			break;
+	}
+
+	kfree(p_hwfn->unzip_buf);
+	return rc;
+}
+
+void qed_gtt_init(struct qed_hwfn *p_hwfn)
+{
+	u32 gtt_base;
+	u32 i;
+
+	/* Set the global windows */
+	gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+	for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
+		if (pxp_global_win[i])
+			REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+			       pxp_global_win[i]);
+}
+
+int qed_init_fw_data(struct qed_dev *cdev,
+		     const u8 *data)
+{
+	struct qed_fw_data *fw = cdev->fw_data;
+	struct bin_buffer_hdr *buf_hdr;
+	u32 offset, len;
+
+	if (!data) {
+		DP_NOTICE(cdev, "Invalid fw data\n");
+		return -EINVAL;
+	}
+
+	buf_hdr = (struct bin_buffer_hdr *)data;
+
+	offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+	fw->init_ops = (union init_op *)(data + offset);
+
+	offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+	fw->arr_data = (u32 *)(data + offset);
+
+	offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+	fw->modes_tree_buf = (u8 *)(data + offset);
+	len = buf_hdr[BIN_BUF_INIT_CMD].length;
+	fw->init_ops_size = len / sizeof(struct init_raw_op);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
new file mode 100644
index 0000000..1e83204
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -0,0 +1,110 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INIT_OPS_H
+#define _QED_INIT_OPS_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/**
+ * @brief qed_init_iro_array - init iro_arr.
+ *
+ *
+ * @param cdev
+ */
+void qed_init_iro_array(struct qed_dev *cdev);
+
+/**
+ * @brief qed_init_run - Run the init-sequence.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param phase
+ * @param phase_id
+ * @param modes
+ * @return _qed_status_t
+ */
+int qed_init_run(struct qed_hwfn *p_hwfn,
+		 struct qed_ptt *p_ptt,
+		 int phase,
+		 int phase_id,
+		 int modes);
+
+/**
+ * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ *
+ *
+ * @param p_hwfn
+ *
+ * @return _qed_status_t
+ */
+int qed_init_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_hwfn_deallocate
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_clear_rt_data - Clears the runtime init array.
+ *
+ *
+ * @param p_hwfn
+ */
+void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ */
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+			   u32 rt_offset,
+			   u32 val);
+
+#define STORE_RT_REG(hwfn, offset, val)	\
+	qed_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val) \
+	qed_init_store_rt_reg(hwfn, offset, val)
+
+/**
+ * @brief
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ * @param size
+ */
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+			   u32 rt_offset,
+			   u32 *val,
+			   size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val) \
+	qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+/**
+ * @brief
+ *      Initialize GTT global windows and set admin window
+ *      related params of GTT/PTT to default values.
+ *
+ * @param p_hwfn
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
new file mode 100644
index 0000000..2e399b6
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -0,0 +1,1134 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+struct qed_pi_info {
+	qed_int_comp_cb_t	comp_cb;
+	void			*cookie;
+};
+
+struct qed_sb_sp_info {
+	struct qed_sb_info	sb_info;
+
+	/* per protocol index data */
+	struct qed_pi_info	pi_info_arr[PIS_PER_SB];
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+	ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+#define ATTN_STATE_BITS (0xfff)
+#define ATTN_BITS_MASKABLE      (0x3ff)
+struct qed_sb_attn_info {
+	/* Virtual & Physical address of the SB */
+	struct atten_status_block       *sb_attn;
+	dma_addr_t		      sb_phys;
+
+	/* Last seen running index */
+	u16			     index;
+
+	/* Previously asserted attentions, which are still unasserted */
+	u16			     known_attn;
+
+	/* Cleanup address for the link's general hw attention */
+	u32			     mfw_attn_addr;
+};
+
+static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
+				      struct qed_sb_attn_info   *p_sb_desc)
+{
+	u16     rc = 0;
+	u16     index;
+
+	/* Make certain HW write took affect */
+	mmiowb();
+
+	index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
+	if (p_sb_desc->index != index) {
+		p_sb_desc->index	= index;
+		rc		      = QED_SB_ATT_IDX;
+	}
+
+	/* Make certain we got a consistent view with HW */
+	mmiowb();
+
+	return rc;
+}
+
+/**
+ *  @brief qed_int_assertion - handles asserted attention bits
+ *
+ *  @param p_hwfn
+ *  @param asserted_bits newly asserted bits
+ *  @return int
+ */
+static int qed_int_assertion(struct qed_hwfn *p_hwfn,
+			     u16 asserted_bits)
+{
+	struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+	u32 igu_mask;
+
+	/* Mask the source of the attention in the IGU */
+	igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+			  IGU_REG_ATTENTION_ENABLE);
+	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
+		   igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
+	igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
+	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+		   "inner known ATTN state: 0x%04x --> 0x%04x\n",
+		   sb_attn_sw->known_attn,
+		   sb_attn_sw->known_attn | asserted_bits);
+	sb_attn_sw->known_attn |= asserted_bits;
+
+	/* Handle MCP events */
+	if (asserted_bits & 0x100) {
+		qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
+		/* Clean the MCP attention */
+		qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+		       sb_attn_sw->mfw_attn_addr, 0);
+	}
+
+	DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+		      GTT_BAR0_MAP_REG_IGU_CMD +
+		      ((IGU_CMD_ATTN_BIT_SET_UPPER -
+			IGU_CMD_INT_ACK_BASE) << 3),
+		      (u32)asserted_bits);
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
+		   asserted_bits);
+
+	return 0;
+}
+
+/**
+ * @brief - handles deassertion of previously asserted attentions.
+ *
+ * @param p_hwfn
+ * @param deasserted_bits - newly deasserted bits
+ * @return int
+ *
+ */
+static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
+			       u16 deasserted_bits)
+{
+	struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+	u32 aeu_mask;
+
+	if (deasserted_bits != 0x100)
+		DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n");
+
+	/* Clear IGU indication for the deasserted bits */
+	DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+		      GTT_BAR0_MAP_REG_IGU_CMD +
+		      ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+			IGU_CMD_INT_ACK_BASE) << 3),
+		      ~((u32)deasserted_bits));
+
+	/* Unmask deasserted attentions in IGU */
+	aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+			  IGU_REG_ATTENTION_ENABLE);
+	aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
+	qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
+
+	/* Clear deassertion from inner state */
+	sb_attn_sw->known_attn &= ~deasserted_bits;
+
+	return 0;
+}
+
+static int qed_int_attentions(struct qed_hwfn *p_hwfn)
+{
+	struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
+	struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
+	u32 attn_bits = 0, attn_acks = 0;
+	u16 asserted_bits, deasserted_bits;
+	__le16 index;
+	int rc = 0;
+
+	/* Read current attention bits/acks - safeguard against attentions
+	 * by guaranting work on a synchronized timeframe
+	 */
+	do {
+		index = p_sb_attn->sb_index;
+		attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
+		attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
+	} while (index != p_sb_attn->sb_index);
+	p_sb_attn->sb_index = index;
+
+	/* Attention / Deassertion are meaningful (and in correct state)
+	 * only when they differ and consistent with known state - deassertion
+	 * when previous attention & current ack, and assertion when current
+	 * attention with no previous attention
+	 */
+	asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
+		~p_sb_attn_sw->known_attn;
+	deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
+		p_sb_attn_sw->known_attn;
+
+	if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
+		DP_INFO(p_hwfn,
+			"Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
+			index, attn_bits, attn_acks, asserted_bits,
+			deasserted_bits, p_sb_attn_sw->known_attn);
+	} else if (asserted_bits == 0x100) {
+		DP_INFO(p_hwfn,
+			"MFW indication via attention\n");
+	} else {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+			   "MFW indication [deassertion]\n");
+	}
+
+	if (asserted_bits) {
+		rc = qed_int_assertion(p_hwfn, asserted_bits);
+		if (rc)
+			return rc;
+	}
+
+	if (deasserted_bits) {
+		rc = qed_int_deassertion(p_hwfn, deasserted_bits);
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
+static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
+			    void __iomem *igu_addr,
+			    u32 ack_cons)
+{
+	struct igu_prod_cons_update igu_ack = { 0 };
+
+	igu_ack.sb_id_and_flags =
+		((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+		 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+		 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+		 (IGU_SEG_ACCESS_ATTN <<
+		  IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+	DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
+
+	/* Both segments (interrupts & acks) are written to same place address;
+	 * Need to guarantee all commands will be received (in-order) by HW.
+	 */
+	mmiowb();
+	barrier();
+}
+
+void qed_int_sp_dpc(unsigned long hwfn_cookie)
+{
+	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
+	struct qed_pi_info *pi_info = NULL;
+	struct qed_sb_attn_info *sb_attn;
+	struct qed_sb_info *sb_info;
+	int arr_size;
+	u16 rc = 0;
+
+	if (!p_hwfn) {
+		DP_ERR(p_hwfn->cdev, "DPC called - no hwfn!\n");
+		return;
+	}
+
+	if (!p_hwfn->p_sp_sb) {
+		DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
+		return;
+	}
+
+	sb_info = &p_hwfn->p_sp_sb->sb_info;
+	arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+	if (!sb_info) {
+		DP_ERR(p_hwfn->cdev,
+		       "Status block is NULL - cannot ack interrupts\n");
+		return;
+	}
+
+	if (!p_hwfn->p_sb_attn) {
+		DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
+		return;
+	}
+	sb_attn = p_hwfn->p_sb_attn;
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+		   p_hwfn, p_hwfn->my_id);
+
+	/* Disable ack for def status block. Required both for msix +
+	 * inta in non-mask mode, in inta does no harm.
+	 */
+	qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+	/* Gather Interrupts/Attentions information */
+	if (!sb_info->sb_virt) {
+		DP_ERR(
+			p_hwfn->cdev,
+			"Interrupt Status block is NULL - cannot check for new interrupts!\n");
+	} else {
+		u32 tmp_index = sb_info->sb_ack;
+
+		rc = qed_sb_update_sb_idx(sb_info);
+		DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+			   "Interrupt indices: 0x%08x --> 0x%08x\n",
+			   tmp_index, sb_info->sb_ack);
+	}
+
+	if (!sb_attn || !sb_attn->sb_attn) {
+		DP_ERR(
+			p_hwfn->cdev,
+			"Attentions Status block is NULL - cannot check for new attentions!\n");
+	} else {
+		u16 tmp_index = sb_attn->index;
+
+		rc |= qed_attn_update_idx(p_hwfn, sb_attn);
+		DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+			   "Attention indices: 0x%08x --> 0x%08x\n",
+			   tmp_index, sb_attn->index);
+	}
+
+	/* Check if we expect interrupts at this time. if not just ack them */
+	if (!(rc & QED_SB_EVENT_MASK)) {
+		qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+		return;
+	}
+
+	/* Check the validity of the DPC ptt. If not ack interrupts and fail */
+	if (!p_hwfn->p_dpc_ptt) {
+		DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
+		qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+		return;
+	}
+
+	if (rc & QED_SB_ATT_IDX)
+		qed_int_attentions(p_hwfn);
+
+	if (rc & QED_SB_IDX) {
+		int pi;
+
+		/* Look for a free index */
+		for (pi = 0; pi < arr_size; pi++) {
+			pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+			if (pi_info->comp_cb)
+				pi_info->comp_cb(p_hwfn, pi_info->cookie);
+		}
+	}
+
+	if (sb_attn && (rc & QED_SB_ATT_IDX))
+		/* This should be done before the interrupts are enabled,
+		 * since otherwise a new attention will be generated.
+		 */
+		qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+
+	qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
+{
+	struct qed_dev *cdev   = p_hwfn->cdev;
+	struct qed_sb_attn_info *p_sb   = p_hwfn->p_sb_attn;
+
+	if (p_sb) {
+		if (p_sb->sb_attn)
+			dma_free_coherent(&cdev->pdev->dev,
+					  SB_ATTN_ALIGNED_SIZE(p_hwfn),
+					  p_sb->sb_attn,
+					  p_sb->sb_phys);
+		kfree(p_sb);
+	}
+}
+
+static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
+				  struct qed_ptt *p_ptt)
+{
+	struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+	memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
+
+	sb_info->index = 0;
+	sb_info->known_attn = 0;
+
+	/* Configure Attention Status Block in IGU */
+	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
+	       lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
+	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
+	       upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
+}
+
+static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
+				 struct qed_ptt *p_ptt,
+				 void *sb_virt_addr,
+				 dma_addr_t sb_phy_addr)
+{
+	struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+	sb_info->sb_attn = sb_virt_addr;
+	sb_info->sb_phys = sb_phy_addr;
+
+	/* Set the address of cleanup for the mcp attention */
+	sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
+				 MISC_REG_AEU_GENERAL_ATTN_0;
+
+	qed_int_sb_attn_setup(p_hwfn, p_ptt);
+}
+
+static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
+				 struct qed_ptt *p_ptt)
+{
+	struct qed_dev *cdev = p_hwfn->cdev;
+	struct qed_sb_attn_info *p_sb;
+	void *p_virt;
+	dma_addr_t p_phys = 0;
+
+	/* SB struct */
+	p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+	if (!p_sb) {
+		DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n");
+		return -ENOMEM;
+	}
+
+	/* SB ring  */
+	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+				    SB_ATTN_ALIGNED_SIZE(p_hwfn),
+				    &p_phys, GFP_KERNEL);
+
+	if (!p_virt) {
+		DP_NOTICE(cdev, "Failed to allocate status block (attentions)\n");
+		kfree(p_sb);
+		return -ENOMEM;
+	}
+
+	/* Attention setup */
+	p_hwfn->p_sb_attn = p_sb;
+	qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
+
+	return 0;
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#define QED_CAU_DEF_RX_USECS 24
+#define QED_CAU_DEF_TX_USECS 48
+
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+			   struct cau_sb_entry *p_sb_entry,
+			   u8 pf_id,
+			   u16 vf_number,
+			   u8 vf_valid)
+{
+	u32 cau_state;
+
+	memset(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+	/* setting the time resultion to a fixed value ( = 1) */
+	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
+		  QED_CAU_DEF_RX_TIMER_RES);
+	SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
+		  QED_CAU_DEF_TX_TIMER_RES);
+
+	cau_state = CAU_HC_DISABLE_STATE;
+
+	if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+		cau_state = CAU_HC_ENABLE_STATE;
+		if (!p_hwfn->cdev->rx_coalesce_usecs)
+			p_hwfn->cdev->rx_coalesce_usecs =
+				QED_CAU_DEF_RX_USECS;
+		if (!p_hwfn->cdev->tx_coalesce_usecs)
+			p_hwfn->cdev->tx_coalesce_usecs =
+				QED_CAU_DEF_TX_USECS;
+	}
+
+	SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
+	SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+}
+
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+			 struct qed_ptt *p_ptt,
+			 dma_addr_t sb_phys,
+			 u16 igu_sb_id,
+			 u16 vf_number,
+			 u8 vf_valid)
+{
+	struct cau_sb_entry sb_entry;
+	u32 val;
+
+	qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+			      vf_number, vf_valid);
+
+	if (p_hwfn->hw_init_done) {
+		val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64);
+		qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys));
+		qed_wr(p_hwfn, p_ptt, val + sizeof(u32),
+		       upper_32_bits(sb_phys));
+
+		val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64);
+		qed_wr(p_hwfn, p_ptt, val, sb_entry.data);
+		qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params);
+	} else {
+		/* Initialize Status Block Address */
+		STORE_RT_REG_AGG(p_hwfn,
+				 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+				 igu_sb_id * 2,
+				 sb_phys);
+
+		STORE_RT_REG_AGG(p_hwfn,
+				 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+				 igu_sb_id * 2,
+				 sb_entry);
+	}
+
+	/* Configure pi coalescing if set */
+	if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+		u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >>
+			     (QED_CAU_DEF_RX_TIMER_RES + 1);
+		u8 num_tc = 1, i;
+
+		qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+				    QED_COAL_RX_STATE_MACHINE,
+				    timeset);
+
+		timeset = p_hwfn->cdev->tx_coalesce_usecs >>
+			  (QED_CAU_DEF_TX_TIMER_RES + 1);
+
+		for (i = 0; i < num_tc; i++) {
+			qed_int_cau_conf_pi(p_hwfn, p_ptt,
+					    igu_sb_id, TX_PI(i),
+					    QED_COAL_TX_STATE_MACHINE,
+					    timeset);
+		}
+	}
+}
+
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+			 struct qed_ptt *p_ptt,
+			 u16 igu_sb_id,
+			 u32 pi_index,
+			 enum qed_coalescing_fsm coalescing_fsm,
+			 u8 timeset)
+{
+	struct cau_pi_entry pi_entry;
+	u32 sb_offset;
+	u32 pi_offset;
+
+	sb_offset = igu_sb_id * PIS_PER_SB;
+	memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+	SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+	if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
+		SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+	else
+		SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+	pi_offset = sb_offset + pi_index;
+	if (p_hwfn->hw_init_done) {
+		qed_wr(p_hwfn, p_ptt,
+		       CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+		       *((u32 *)&(pi_entry)));
+	} else {
+		STORE_RT_REG(p_hwfn,
+			     CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+			     *((u32 *)&(pi_entry)));
+	}
+}
+
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+		      struct qed_ptt *p_ptt,
+		      struct qed_sb_info *sb_info)
+{
+	/* zero status block and ack counter */
+	sb_info->sb_ack = 0;
+	memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+	qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+			    sb_info->igu_sb_id, 0, 0);
+}
+
+/**
+ * @brief qed_get_igu_sb_id - given a sw sb_id return the
+ *        igu_sb_id
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return u16
+ */
+static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
+			     u16 sb_id)
+{
+	u16 igu_sb_id;
+
+	/* Assuming continuous set of IGU SBs dedicated for given PF */
+	if (sb_id == QED_SP_SB_ID)
+		igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+	else
+		igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
+		   (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
+
+	return igu_sb_id;
+}
+
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt,
+		    struct qed_sb_info *sb_info,
+		    void *sb_virt_addr,
+		    dma_addr_t sb_phy_addr,
+		    u16 sb_id)
+{
+	sb_info->sb_virt = sb_virt_addr;
+	sb_info->sb_phys = sb_phy_addr;
+
+	sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
+
+	if (sb_id != QED_SP_SB_ID) {
+		p_hwfn->sbs_info[sb_id] = sb_info;
+		p_hwfn->num_sbs++;
+	}
+
+	sb_info->cdev = p_hwfn->cdev;
+
+	/* The igu address will hold the absolute address that needs to be
+	 * written to for a specific status block
+	 */
+	sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+					  GTT_BAR0_MAP_REG_IGU_CMD +
+					  (sb_info->igu_sb_id << 3);
+
+	sb_info->flags |= QED_SB_INFO_INIT;
+
+	qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+	return 0;
+}
+
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+		       struct qed_sb_info *sb_info,
+		       u16 sb_id)
+{
+	if (sb_id == QED_SP_SB_ID) {
+		DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+		return -EINVAL;
+	}
+
+	/* zero status block and ack counter */
+	sb_info->sb_ack = 0;
+	memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+	p_hwfn->sbs_info[sb_id] = NULL;
+	p_hwfn->num_sbs--;
+
+	return 0;
+}
+
+static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
+{
+	struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+	if (p_sb) {
+		if (p_sb->sb_info.sb_virt)
+			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+					  SB_ALIGNED_SIZE(p_hwfn),
+					  p_sb->sb_info.sb_virt,
+					  p_sb->sb_info.sb_phys);
+		kfree(p_sb);
+	}
+}
+
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn,
+			       struct qed_ptt *p_ptt)
+{
+	struct qed_sb_sp_info *p_sb;
+	dma_addr_t p_phys = 0;
+	void *p_virt;
+
+	/* SB struct */
+	p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC);
+	if (!p_sb) {
+		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n");
+		return -ENOMEM;
+	}
+
+	/* SB ring  */
+	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+				    SB_ALIGNED_SIZE(p_hwfn),
+				    &p_phys, GFP_KERNEL);
+	if (!p_virt) {
+		DP_NOTICE(p_hwfn, "Failed to allocate status block\n");
+		kfree(p_sb);
+		return -ENOMEM;
+	}
+
+	/* Status Block setup */
+	p_hwfn->p_sp_sb = p_sb;
+	qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
+			p_phys, QED_SP_SB_ID);
+
+	memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+	return 0;
+}
+
+static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn,
+				struct qed_ptt *p_ptt)
+{
+	if (!p_hwfn)
+		return;
+
+	if (p_hwfn->p_sp_sb)
+		qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+	else
+		DP_NOTICE(p_hwfn->cdev,
+			  "Failed to setup Slow path status block - NULL pointer\n");
+
+	if (p_hwfn->p_sb_attn)
+		qed_int_sb_attn_setup(p_hwfn, p_ptt);
+	else
+		DP_NOTICE(p_hwfn->cdev,
+			  "Failed to setup attentions status block - NULL pointer\n");
+}
+
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+			qed_int_comp_cb_t comp_cb,
+			void *cookie,
+			u8 *sb_idx,
+			__le16 **p_fw_cons)
+{
+	struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+	int qed_status = -ENOMEM;
+	u8 pi;
+
+	/* Look for a free index */
+	for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+		if (!p_sp_sb->pi_info_arr[pi].comp_cb) {
+			p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+			p_sp_sb->pi_info_arr[pi].cookie = cookie;
+			*sb_idx = pi;
+			*p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+			qed_status = 0;
+			break;
+		}
+	}
+
+	return qed_status;
+}
+
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
+{
+	struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+	int qed_status = -ENOMEM;
+
+	if (p_sp_sb->pi_info_arr[pi].comp_cb) {
+		p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
+		p_sp_sb->pi_info_arr[pi].cookie = NULL;
+		qed_status = 0;
+	}
+
+	return qed_status;
+}
+
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
+{
+	return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+			    struct qed_ptt *p_ptt,
+			    enum qed_int_mode int_mode)
+{
+	u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
+
+	p_hwfn->cdev->int_mode = int_mode;
+	switch (p_hwfn->cdev->int_mode) {
+	case QED_INT_MODE_INTA:
+		igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+		igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+		break;
+
+	case QED_INT_MODE_MSI:
+		igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+		igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+		break;
+
+	case QED_INT_MODE_MSIX:
+		igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+		break;
+	case QED_INT_MODE_POLL:
+		break;
+	}
+
+	qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+			struct qed_ptt *p_ptt,
+			enum qed_int_mode int_mode)
+{
+	int i;
+
+	p_hwfn->b_int_enabled = 1;
+
+	/* Mask non-link attentions */
+	for (i = 0; i < 9; i++)
+		qed_wr(p_hwfn, p_ptt,
+		       MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
+
+	/* Enable interrupt Generation */
+	qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+
+	/* Configure AEU signal change to produce attentions for link */
+	qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+	qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+
+	/* Flush the writes to IGU */
+	mmiowb();
+
+	/* Unmask AEU signals toward IGU */
+	qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+			     struct qed_ptt *p_ptt)
+{
+	p_hwfn->b_int_enabled = 0;
+
+	qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH                (1000)
+void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+			    struct qed_ptt *p_ptt,
+			    u32 sb_id,
+			    bool cleanup_set,
+			    u16 opaque_fid
+			    )
+{
+	u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+	u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+	u32 data = 0;
+	u32 cmd_ctrl = 0;
+	u32 val = 0;
+	u32 sb_bit = 0;
+	u32 sb_bit_addr = 0;
+
+	/* Set the data field */
+	SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+	SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
+	SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+	/* Set the control register */
+	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+	SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+	qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+	barrier();
+
+	qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+	/* Flush the write to IGU */
+	mmiowb();
+
+	/* calculate where to read the status bit from */
+	sb_bit = 1 << (sb_id % 32);
+	sb_bit_addr = sb_id / 32 * sizeof(u32);
+
+	sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
+
+	/* Now wait for the command to complete */
+	do {
+		val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
+
+		if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+			break;
+
+		usleep_range(5000, 10000);
+	} while (--sleep_cnt);
+
+	if (!sleep_cnt)
+		DP_NOTICE(p_hwfn,
+			  "Timeout waiting for clear status 0x%08x [for sb %d]\n",
+			  val, sb_id);
+}
+
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt,
+				     u32 sb_id,
+				     u16 opaque,
+				     bool b_set)
+{
+	int pi;
+
+	/* Set */
+	if (b_set)
+		qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+
+	/* Clear */
+	qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+
+	/* Clear the CAU for the SB */
+	for (pi = 0; pi < 12; pi++)
+		qed_wr(p_hwfn, p_ptt,
+		       CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+}
+
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+			      struct qed_ptt *p_ptt,
+			      bool b_set,
+			      bool b_slowpath)
+{
+	u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
+	u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
+	u32 sb_id = 0;
+	u32 val = 0;
+
+	val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+	val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+	val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+	qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+		   "IGU cleaning SBs [%d,...,%d]\n",
+		   igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+
+	for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
+		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+						p_hwfn->hw_info.opaque_fid,
+						b_set);
+
+	if (b_slowpath) {
+		sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+			   "IGU cleaning slowpath SB [%d]\n", sb_id);
+		qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+						p_hwfn->hw_info.opaque_fid,
+						b_set);
+	}
+}
+
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+			 struct qed_ptt *p_ptt)
+{
+	struct qed_igu_info *p_igu_info;
+	struct qed_igu_block *blk;
+	u32 val;
+	u16 sb_id;
+	u16 prev_sb_id = 0xFF;
+
+	p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC);
+
+	if (!p_hwfn->hw_info.p_igu_info)
+		return -ENOMEM;
+
+	p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+	/* Initialize base sb / sb cnt for PFs */
+	p_igu_info->igu_base_sb		= 0xffff;
+	p_igu_info->igu_sb_cnt		= 0;
+	p_igu_info->igu_dsb_id		= 0xffff;
+	p_igu_info->igu_base_sb_iov	= 0xffff;
+
+	for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+	     sb_id++) {
+		blk = &p_igu_info->igu_map.igu_blocks[sb_id];
+
+		val = qed_rd(p_hwfn, p_ptt,
+			     IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+
+		/* stop scanning when hit first invalid PF entry */
+		if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+		    GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+			break;
+
+		blk->status = QED_IGU_STATUS_VALID;
+		blk->function_id = GET_FIELD(val,
+					     IGU_MAPPING_LINE_FUNCTION_NUMBER);
+		blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+		blk->vector_number = GET_FIELD(val,
+					       IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+		DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+			   "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n",
+			   val, blk->function_id, blk->is_pf,
+			   blk->vector_number);
+
+		if (blk->is_pf) {
+			if (blk->function_id == p_hwfn->rel_pf_id) {
+				blk->status |= QED_IGU_STATUS_PF;
+
+				if (blk->vector_number == 0) {
+					if (p_igu_info->igu_dsb_id == 0xffff)
+						p_igu_info->igu_dsb_id = sb_id;
+				} else {
+					if (p_igu_info->igu_base_sb ==
+					    0xffff) {
+						p_igu_info->igu_base_sb = sb_id;
+					} else if (prev_sb_id != sb_id - 1) {
+						DP_NOTICE(p_hwfn->cdev,
+							  "consecutive igu vectors for HWFN %x broken",
+							  p_hwfn->rel_pf_id);
+						break;
+					}
+					prev_sb_id = sb_id;
+					/* we don't count the default */
+					(p_igu_info->igu_sb_cnt)++;
+				}
+			}
+		}
+	}
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+		   "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+		   p_igu_info->igu_base_sb,
+		   p_igu_info->igu_sb_cnt,
+		   p_igu_info->igu_dsb_id);
+
+	if (p_igu_info->igu_base_sb == 0xffff ||
+	    p_igu_info->igu_dsb_id == 0xffff ||
+	    p_igu_info->igu_sb_cnt == 0) {
+		DP_NOTICE(p_hwfn,
+			  "IGU CAM returned invalid values igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+			   p_igu_info->igu_base_sb,
+			   p_igu_info->igu_sb_cnt,
+			   p_igu_info->igu_dsb_id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * @brief Initialize igu runtime registers
+ *
+ * @param p_hwfn
+ */
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
+{
+	u32 igu_pf_conf = 0;
+
+	igu_pf_conf |= IGU_PF_CONF_FUNC_EN;
+
+	STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
+{
+	u64 intr_status = 0;
+	u32 intr_status_lo = 0;
+	u32 intr_status_hi = 0;
+	u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
+			       IGU_CMD_INT_ACK_BASE;
+	u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
+			       IGU_CMD_INT_ACK_BASE;
+
+	intr_status_lo = REG_RD(p_hwfn,
+				GTT_BAR0_MAP_REG_IGU_CMD +
+				lsb_igu_cmd_addr * 8);
+	intr_status_hi = REG_RD(p_hwfn,
+				GTT_BAR0_MAP_REG_IGU_CMD +
+				msb_igu_cmd_addr * 8);
+	intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
+
+	return intr_status;
+}
+
+static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
+{
+	tasklet_init(p_hwfn->sp_dpc,
+		     qed_int_sp_dpc, (unsigned long)p_hwfn);
+	p_hwfn->b_sp_dpc_enabled = true;
+}
+
+static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
+{
+	p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC);
+	if (!p_hwfn->sp_dpc)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
+{
+	kfree(p_hwfn->sp_dpc);
+}
+
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+		  struct qed_ptt *p_ptt)
+{
+	int rc = 0;
+
+	rc = qed_int_sp_dpc_alloc(p_hwfn);
+	if (rc) {
+		DP_ERR(p_hwfn->cdev, "Failed to allocate sp dpc mem\n");
+		return rc;
+	}
+	rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
+	if (rc) {
+		DP_ERR(p_hwfn->cdev, "Failed to allocate sp sb mem\n");
+		return rc;
+	}
+	rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
+	if (rc) {
+		DP_ERR(p_hwfn->cdev, "Failed to allocate sb attn mem\n");
+		return rc;
+	}
+	return rc;
+}
+
+void qed_int_free(struct qed_hwfn *p_hwfn)
+{
+	qed_int_sp_sb_free(p_hwfn);
+	qed_int_sb_attn_free(p_hwfn);
+	qed_int_sp_dpc_free(p_hwfn);
+}
+
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+		   struct qed_ptt *p_ptt)
+{
+	qed_int_sp_sb_setup(p_hwfn, p_ptt);
+	qed_int_sp_dpc_setup(p_hwfn);
+}
+
+int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+			int *p_iov_blks)
+{
+	struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
+
+	if (!info)
+		return 0;
+
+	if (p_iov_blks)
+		*p_iov_blks = info->free_blks;
+
+	return info->igu_sb_cnt;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
new file mode 100644
index 0000000..16b5751
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -0,0 +1,391 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_INT_H
+#define _QED_INT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN       (0x1 << 0)    /* function enable        */
+#define IGU_PF_CONF_MSI_MSIX_EN   (0x1 << 1)    /* MSI/MSIX enable        */
+#define IGU_PF_CONF_INT_LINE_EN   (0x1 << 2)    /* INT enable             */
+#define IGU_PF_CONF_ATTN_BIT_EN   (0x1 << 3)    /* attention enable       */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4)    /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE     (0x1 << 5)    /* simd all ones mode     */
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+	IGU_CTRL_CMD_TYPE_RD,
+	IGU_CTRL_CMD_TYPE_WR,
+	MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+	u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK           0xFFFF  /* Opaque_FID	 */
+#define IGU_CTRL_REG_FID_SHIFT          0
+#define IGU_CTRL_REG_PXP_ADDR_MASK      0xFFF   /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT     16
+#define IGU_CTRL_REG_RESERVED_MASK      0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT     28
+#define IGU_CTRL_REG_TYPE_MASK          0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT         31
+};
+
+enum qed_coalescing_fsm {
+	QED_COAL_RX_STATE_MACHINE,
+	QED_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * @brief qed_int_cau_conf_pi - configure cau for a given
+ *        status block
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param igu_sb_id
+ * @param pi_index
+ * @param state
+ * @param timeset
+ */
+void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+			 struct qed_ptt *p_ptt,
+			 u16 igu_sb_id,
+			 u32 pi_index,
+			 enum qed_coalescing_fsm coalescing_fsm,
+			 u8 timeset);
+
+/**
+ * @brief qed_int_igu_enable_int - enable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode - interrupt mode to use
+ */
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+			    struct qed_ptt *p_ptt,
+			    enum qed_int_mode int_mode);
+
+/**
+ * @brief qed_int_igu_disable_int - disable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+			     struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+ *        register from igu.
+ *
+ * @param p_hwfn
+ *
+ * @return u64
+ */
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
+
+#define QED_SP_SB_ID 0xffff
+/**
+ * @brief qed_int_sb_init - Initializes the sb_info structure.
+ *
+ * once the structure is initialized it can be passed to sb related functions.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info	points to an uninitialized (but
+ *			allocated) sb_info structure
+ * @param sb_virt_addr
+ * @param sb_phy_addr
+ * @param sb_id	the sb_id to be used (zero based in driver)
+ *			should use QED_SP_SB_ID for SP Status block
+ *
+ * @return int
+ */
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+		    struct qed_ptt *p_ptt,
+		    struct qed_sb_info *sb_info,
+		    void *sb_virt_addr,
+		    dma_addr_t sb_phy_addr,
+		    u16 sb_id);
+/**
+ * @brief qed_int_sb_setup - Setup the sb.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info	initialized sb_info structure
+ */
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+		      struct qed_ptt *p_ptt,
+		      struct qed_sb_info *sb_info);
+
+/**
+ * @brief qed_int_sb_release - releases the sb_info structure.
+ *
+ * once the structure is released, it's memory can be freed
+ *
+ * @param p_hwfn
+ * @param sb_info	points to an allocated sb_info structure
+ * @param sb_id		the sb_id to be used (zero based in driver)
+ *			should never be equal to QED_SP_SB_ID
+ *			(SP Status block)
+ *
+ * @return int
+ */
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+		       struct qed_sb_info *sb_info,
+		       u16 sb_id);
+
+/**
+ * @brief qed_int_sp_dpc - To be called when an interrupt is received on the
+ *        default status block.
+ *
+ * @param p_hwfn - pointer to hwfn
+ *
+ */
+void qed_int_sp_dpc(unsigned long hwfn_cookie);
+
+/**
+ * @brief qed_int_get_num_sbs - get the number of status
+ *        blocks configured for this funciton in the igu.
+ *
+ * @param p_hwfn
+ * @param p_iov_blks - configured free blks for vfs
+ *
+ * @return int - number of status blocks configured
+ */
+int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+			int *p_iov_blks);
+
+/**
+ * @file
+ *
+ * @brief Interrupt handler
+ */
+
+#define QED_CAU_DEF_RX_TIMER_RES 0
+#define QED_CAU_DEF_TX_TIMER_RES 0
+
+#define QED_SB_ATT_IDX  0x0001
+#define QED_SB_EVENT_MASK       0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn)	\
+	ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+
+struct qed_igu_block {
+	u8	status;
+#define QED_IGU_STATUS_FREE     0x01
+#define QED_IGU_STATUS_VALID    0x02
+#define QED_IGU_STATUS_PF       0x04
+
+	u8	vector_number;
+	u8	function_id;
+	u8	is_pf;
+};
+
+struct qed_igu_map {
+	struct qed_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+};
+
+struct qed_igu_info {
+	struct qed_igu_map	igu_map;
+	u16			igu_dsb_id;
+	u16			igu_base_sb;
+	u16			igu_base_sb_iov;
+	u16			igu_sb_cnt;
+	u16			igu_sb_cnt_iov;
+	u16			free_blks;
+};
+
+/* TODO Names of function may change... */
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+			      struct qed_ptt *p_ptt,
+			      bool b_set,
+			      bool b_slowpath);
+
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_igu_read_cam - Reads the IGU CAM.
+ *	This function needs to be called during hardware
+ *	prepare. It reads the info from igu cam to know which
+ *	status block is the default / base status block etc.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+			 struct qed_ptt *p_ptt);
+
+typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
+				 void *cookie);
+/**
+ * @brief qed_int_register_cb - Register callback func for
+ *      slowhwfn statusblock.
+ *
+ *	Every protocol that uses the slowhwfn status block
+ *	should register a callback function that will be called
+ *	once there is an update of the sp status block.
+ *
+ * @param p_hwfn
+ * @param comp_cb - function to be called when there is an
+ *                  interrupt on the sp sb
+ *
+ * @param cookie  - passed to the callback function
+ * @param sb_idx  - OUT parameter which gives the chosen index
+ *                  for this protocol.
+ * @param p_fw_cons  - pointer to the actual address of the
+ *                     consumer for this protocol.
+ *
+ * @return int
+ */
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+			qed_int_comp_cb_t comp_cb,
+			void *cookie,
+			u8 *sb_idx,
+			__le16 **p_fw_cons);
+
+/**
+ * @brief qed_int_unregister_cb - Unregisters callback
+ *      function from sp sb.
+ *      Partner of qed_int_register_cb -> should be called
+ *      when no longer required.
+ *
+ * @param p_hwfn
+ * @param pi
+ *
+ * @return int
+ */
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
+			  u8 pi);
+
+/**
+ * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
+ *
+ * @param p_hwfn
+ *
+ * @return u16
+ */
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ *        block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id		- igu status block id
+ * @param cleanup_set	- set(1) / clear(0)
+ * @param opaque_fid    - the function for which to perform
+ *			cleanup, for example a PF on behalf of
+ *			its VFs.
+ */
+void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+			    struct qed_ptt *p_ptt,
+			    u32 sb_id,
+			    bool cleanup_set,
+			    u16 opaque_fid);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ *        block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id		- igu status block id
+ * @param opaque	- opaque fid of the sb owner.
+ * @param cleanup_set	- set(1) / clear(0)
+ */
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt,
+				     u32 sb_id,
+				     u16 opaque,
+				     bool b_set);
+
+/**
+ * @brief qed_int_cau_conf - configure cau for a given status
+ *        block
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param sb_phys
+ * @param igu_sb_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+			 struct qed_ptt *p_ptt,
+			 dma_addr_t sb_phys,
+			 u16 igu_sb_id,
+			 u16 vf_number,
+			 u8 vf_valid);
+
+/**
+ * @brief qed_int_alloc
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+		  struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_int_free
+ *
+ * @param p_hwfn
+ */
+void qed_int_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_int_setup
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+		   struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Enable Interrupt & Attention for hw function
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode
+ */
+void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+			struct qed_ptt *p_ptt,
+			enum qed_int_mode int_mode);
+
+/**
+ * @brief - Initialize CAU status block entry
+ *
+ * @param p_hwfn
+ * @param p_sb_entry
+ * @param pf_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+			   struct cau_sb_entry *p_sb_entry,
+			   u8 pf_id,
+			   u16 vf_number,
+			   u8 vf_valid);
+
+#define QED_MAPPING_MEMORY_SIZE(dev)	(NUM_OF_SBS(dev))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
new file mode 100644
index 0000000..f72036a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -0,0 +1,1704 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include <linux/qed/qed_eth_if.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+enum qed_rss_caps {
+	QED_RSS_IPV4		= 0x1,
+	QED_RSS_IPV6		= 0x2,
+	QED_RSS_IPV4_TCP	= 0x4,
+	QED_RSS_IPV6_TCP	= 0x8,
+	QED_RSS_IPV4_UDP	= 0x10,
+	QED_RSS_IPV6_UDP	= 0x20,
+};
+
+/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
+#define QED_RSS_IND_TABLE_SIZE 128
+#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
+
+struct qed_rss_params {
+	u8	update_rss_config;
+	u8	rss_enable;
+	u8	rss_eng_id;
+	u8	update_rss_capabilities;
+	u8	update_rss_ind_table;
+	u8	update_rss_key;
+	u8	rss_caps;
+	u8	rss_table_size_log;
+	u16	rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+	u32	rss_key[QED_RSS_KEY_SIZE];
+};
+
+enum qed_filter_opcode {
+	QED_FILTER_ADD,
+	QED_FILTER_REMOVE,
+	QED_FILTER_MOVE,
+	QED_FILTER_REPLACE,     /* Delete all MACs and add new one instead */
+	QED_FILTER_FLUSH,       /* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+	QED_FILTER_MAC,
+	QED_FILTER_VLAN,
+	QED_FILTER_MAC_VLAN,
+	QED_FILTER_INNER_MAC,
+	QED_FILTER_INNER_VLAN,
+	QED_FILTER_INNER_PAIR,
+	QED_FILTER_INNER_MAC_VNI_PAIR,
+	QED_FILTER_MAC_VNI_PAIR,
+	QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+	enum qed_filter_opcode		opcode;
+	enum qed_filter_ucast_type	type;
+	u8				is_rx_filter;
+	u8				is_tx_filter;
+	u8				vport_to_add_to;
+	u8				vport_to_remove_from;
+	unsigned char			mac[ETH_ALEN];
+	u8				assert_on_error;
+	u16				vlan;
+	u32				vni;
+};
+
+struct qed_filter_mcast {
+	/* MOVE is not supported for multicast */
+	enum qed_filter_opcode	opcode;
+	u8			vport_to_add_to;
+	u8			vport_to_remove_from;
+	u8			num_mc_addrs;
+#define QED_MAX_MC_ADDRS        64
+	unsigned char		mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+struct qed_filter_accept_flags {
+	u8	update_rx_mode_config;
+	u8	update_tx_mode_config;
+	u8	rx_accept_filter;
+	u8	tx_accept_filter;
+#define QED_ACCEPT_NONE         0x01
+#define QED_ACCEPT_UCAST_MATCHED        0x02
+#define QED_ACCEPT_UCAST_UNMATCHED      0x04
+#define QED_ACCEPT_MCAST_MATCHED        0x08
+#define QED_ACCEPT_MCAST_UNMATCHED      0x10
+#define QED_ACCEPT_BCAST                0x20
+};
+
+struct qed_sp_vport_update_params {
+	u16				opaque_fid;
+	u8				vport_id;
+	u8				update_vport_active_rx_flg;
+	u8				vport_active_rx_flg;
+	u8				update_vport_active_tx_flg;
+	u8				vport_active_tx_flg;
+	u8				update_approx_mcast_flg;
+	unsigned long			bins[8];
+	struct qed_rss_params		*rss_params;
+	struct qed_filter_accept_flags	accept_flags;
+};
+
+#define QED_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+			      u32 concrete_fid,
+			      u16 opaque_fid,
+			      u8 vport_id,
+			      u16 mtu,
+			      u8 drop_ttl0_flg,
+			      u8 inner_vlan_removal_en_flg)
+{
+	struct qed_sp_init_request_params params;
+	struct vport_start_ramrod_data *p_ramrod = NULL;
+	struct qed_spq_entry *p_ent =  NULL;
+	int rc = -EINVAL;
+	u16 rx_mode = 0;
+	u8 abs_vport_id = 0;
+
+	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+	if (rc != 0)
+		return rc;
+
+	memset(&params, 0, sizeof(params));
+	params.ramrod_data_size = sizeof(*p_ramrod);
+	params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 qed_spq_get_cid(p_hwfn),
+				 opaque_fid,
+				 ETH_RAMROD_VPORT_START,
+				 PROTOCOLID_ETH,
+				 &params);
+	if (rc)
+		return rc;
+
+	p_ramrod		= &p_ent->ramrod.vport_start;
+	p_ramrod->vport_id	= abs_vport_id;
+
+	p_ramrod->mtu			= cpu_to_le16(mtu);
+	p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg;
+	p_ramrod->drop_ttl0_en		= drop_ttl0_flg;
+
+	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+	p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
+
+	/* TPA related fields */
+	memset(&p_ramrod->tpa_param, 0,
+	       sizeof(struct eth_vport_tpa_param));
+
+	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+	p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
+						  concrete_fid);
+
+	return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
+			struct vport_update_ramrod_data *p_ramrod,
+			struct qed_rss_params *p_params)
+{
+	struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
+	u16 abs_l2_queue = 0, capabilities = 0;
+	int rc = 0, i;
+
+	if (!p_params) {
+		p_ramrod->common.update_rss_flg = 0;
+		return rc;
+	}
+
+	BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
+		     ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+	rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
+	if (rc)
+		return rc;
+
+	p_ramrod->common.update_rss_flg = p_params->update_rss_config;
+	rss->update_rss_capabilities = p_params->update_rss_capabilities;
+	rss->update_rss_ind_table = p_params->update_rss_ind_table;
+	rss->update_rss_key = p_params->update_rss_key;
+
+	rss->rss_mode = p_params->rss_enable ?
+			ETH_VPORT_RSS_MODE_REGULAR :
+			ETH_VPORT_RSS_MODE_DISABLED;
+
+	SET_FIELD(capabilities,
+		  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+		  !!(p_params->rss_caps & QED_RSS_IPV4));
+	SET_FIELD(capabilities,
+		  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+		  !!(p_params->rss_caps & QED_RSS_IPV6));
+	SET_FIELD(capabilities,
+		  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+		  !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
+	SET_FIELD(capabilities,
+		  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+		  !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
+	SET_FIELD(capabilities,
+		  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+		  !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
+	SET_FIELD(capabilities,
+		  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+		  !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
+	rss->tbl_size = p_params->rss_table_size_log;
+
+	rss->capabilities = cpu_to_le16(capabilities);
+
+	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+		   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+		   p_ramrod->common.update_rss_flg,
+		   rss->rss_mode, rss->update_rss_capabilities,
+		   capabilities, rss->update_rss_ind_table,
+		   rss->update_rss_key);
+
+	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+		rc = qed_fw_l2_queue(p_hwfn,
+				     (u8)p_params->rss_ind_table[i],
+				     &abs_l2_queue);
+		if (rc)
+			return rc;
+
+		rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
+		DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
+			   i, rss->indirection_table[i]);
+	}
+
+	for (i = 0; i < 10; i++)
+		rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
+
+	return rc;
+}
+
+static void
+qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
+			  struct vport_update_ramrod_data *p_ramrod,
+			  struct qed_filter_accept_flags accept_flags)
+{
+	p_ramrod->common.update_rx_mode_flg =
+		accept_flags.update_rx_mode_config;
+
+	p_ramrod->common.update_tx_mode_flg =
+		accept_flags.update_tx_mode_config;
+
+	/* Set Rx mode accept flags */
+	if (p_ramrod->common.update_rx_mode_flg) {
+		u8 accept_filter = accept_flags.rx_accept_filter;
+		u16 state = 0;
+
+		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+			  !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
+			    !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+			  !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
+
+		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+			  !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
+			    !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+		SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+			  !!(accept_filter & QED_ACCEPT_BCAST));
+
+		p_ramrod->rx_mode.state = cpu_to_le16(state);
+		DP_VERBOSE(p_hwfn, QED_MSG_SP,
+			   "p_ramrod->rx_mode.state = 0x%x\n", state);
+	}
+
+	/* Set Tx mode accept flags */
+	if (p_ramrod->common.update_tx_mode_flg) {
+		u8 accept_filter = accept_flags.tx_accept_filter;
+		u16 state = 0;
+
+		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+			  !!(accept_filter & QED_ACCEPT_NONE));
+
+		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+			  (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
+			   !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+			  !!(accept_filter & QED_ACCEPT_NONE));
+
+		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+		SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+			  !!(accept_filter & QED_ACCEPT_BCAST));
+
+		p_ramrod->tx_mode.state = cpu_to_le16(state);
+		DP_VERBOSE(p_hwfn, QED_MSG_SP,
+			   "p_ramrod->tx_mode.state = 0x%x\n", state);
+	}
+}
+
+static void
+qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
+			struct vport_update_ramrod_data *p_ramrod,
+			struct qed_sp_vport_update_params *p_params)
+{
+	int i;
+
+	memset(&p_ramrod->approx_mcast.bins, 0,
+	       sizeof(p_ramrod->approx_mcast.bins));
+
+	if (p_params->update_approx_mcast_flg) {
+		p_ramrod->common.update_approx_mcast_flg = 1;
+		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+			u32 *p_bins = (u32 *)p_params->bins;
+			__le32 val = cpu_to_le32(p_bins[i]);
+
+			p_ramrod->approx_mcast.bins[i] = val;
+		}
+	}
+}
+
+static int
+qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+		    struct qed_sp_vport_update_params *p_params,
+		    enum spq_mode comp_mode,
+		    struct qed_spq_comp_cb *p_comp_data)
+{
+	struct qed_rss_params *p_rss_params = p_params->rss_params;
+	struct vport_update_ramrod_data_cmn *p_cmn;
+	struct qed_sp_init_request_params sp_params;
+	struct vport_update_ramrod_data *p_ramrod = NULL;
+	struct qed_spq_entry *p_ent = NULL;
+	u8 abs_vport_id = 0;
+	int rc = -EINVAL;
+
+	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+	if (rc != 0)
+		return rc;
+
+	memset(&sp_params, 0, sizeof(sp_params));
+	sp_params.ramrod_data_size = sizeof(*p_ramrod);
+	sp_params.comp_mode = comp_mode;
+	sp_params.p_comp_data = p_comp_data;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 qed_spq_get_cid(p_hwfn),
+				 p_params->opaque_fid,
+				 ETH_RAMROD_VPORT_UPDATE,
+				 PROTOCOLID_ETH,
+				 &sp_params);
+	if (rc)
+		return rc;
+
+	/* Copy input params to ramrod according to FW struct */
+	p_ramrod = &p_ent->ramrod.vport_update;
+	p_cmn = &p_ramrod->common;
+
+	p_cmn->vport_id = abs_vport_id;
+	p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
+	p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
+	p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
+	p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
+
+	rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+	if (rc) {
+		/* Return spq entry which is taken in qed_sp_init_request()*/
+		qed_spq_return_entry(p_hwfn, p_ent);
+		return rc;
+	}
+
+	/* Update mcast bins for VFs, PF doesn't use this functionality */
+	qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+	qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+	return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
+			     u16 opaque_fid,
+			     u8 vport_id)
+{
+	struct qed_sp_init_request_params sp_params;
+	struct vport_stop_ramrod_data *p_ramrod;
+	struct qed_spq_entry *p_ent;
+	u8 abs_vport_id = 0;
+	int rc;
+
+	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+	if (rc != 0)
+		return rc;
+
+	memset(&sp_params, 0, sizeof(sp_params));
+	sp_params.ramrod_data_size = sizeof(*p_ramrod);
+	sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 qed_spq_get_cid(p_hwfn),
+				 opaque_fid,
+				 ETH_RAMROD_VPORT_STOP,
+				 PROTOCOLID_ETH,
+				 &sp_params);
+	if (rc)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.vport_stop;
+	p_ramrod->vport_id = abs_vport_id;
+
+	return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_filter_accept_cmd(struct qed_dev *cdev,
+				 u8 vport,
+				 struct qed_filter_accept_flags accept_flags,
+				 enum spq_mode comp_mode,
+				 struct qed_spq_comp_cb *p_comp_data)
+{
+	struct qed_sp_vport_update_params vport_update_params;
+	int i, rc;
+
+	/* Prepare and send the vport rx_mode change */
+	memset(&vport_update_params, 0, sizeof(vport_update_params));
+	vport_update_params.vport_id = vport;
+	vport_update_params.accept_flags = accept_flags;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+		rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
+					 comp_mode, p_comp_data);
+		if (rc != 0) {
+			DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
+			return rc;
+		}
+
+		DP_VERBOSE(p_hwfn, QED_MSG_SP,
+			   "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+			   accept_flags.rx_accept_filter,
+			   accept_flags.tx_accept_filter);
+	}
+
+	return 0;
+}
+
+static int qed_sp_release_queue_cid(
+	struct qed_hwfn *p_hwfn,
+	struct qed_hw_cid_data *p_cid_data)
+{
+	if (!p_cid_data->b_cid_allocated)
+		return 0;
+
+	qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
+
+	p_cid_data->b_cid_allocated = false;
+
+	return 0;
+}
+
+static int
+qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+			    u16 opaque_fid,
+			    u32 cid,
+			    struct qed_queue_start_common_params *params,
+			    u8 stats_id,
+			    u16 bd_max_bytes,
+			    dma_addr_t bd_chain_phys_addr,
+			    dma_addr_t cqe_pbl_addr,
+			    u16 cqe_pbl_size)
+{
+	struct rx_queue_start_ramrod_data *p_ramrod = NULL;
+	struct qed_sp_init_request_params sp_params;
+	struct qed_spq_entry *p_ent = NULL;
+	struct qed_hw_cid_data *p_rx_cid;
+	u16 abs_rx_q_id = 0;
+	u8 abs_vport_id = 0;
+	int rc = -EINVAL;
+
+	/* Store information for the stop */
+	p_rx_cid		= &p_hwfn->p_rx_cids[params->queue_id];
+	p_rx_cid->cid		= cid;
+	p_rx_cid->opaque_fid	= opaque_fid;
+	p_rx_cid->vport_id	= params->vport_id;
+
+	rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
+	if (rc != 0)
+		return rc;
+
+	rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
+	if (rc != 0)
+		return rc;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SP,
+		   "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+		   opaque_fid, cid, params->queue_id, params->vport_id,
+		   params->sb);
+
+	memset(&sp_params, 0, sizeof(params));
+	sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+	sp_params.ramrod_data_size = sizeof(*p_ramrod);
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 cid, opaque_fid,
+				 ETH_RAMROD_RX_QUEUE_START,
+				 PROTOCOLID_ETH,
+				 &sp_params);
+	if (rc)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+	p_ramrod->sb_id			= cpu_to_le16(params->sb);
+	p_ramrod->sb_index		= params->sb_idx;
+	p_ramrod->vport_id		= abs_vport_id;
+	p_ramrod->stats_counter_id	= stats_id;
+	p_ramrod->rx_queue_id		= cpu_to_le16(abs_rx_q_id);
+	p_ramrod->complete_cqe_flg	= 0;
+	p_ramrod->complete_event_flg	= 1;
+
+	p_ramrod->bd_max_bytes	= cpu_to_le16(bd_max_bytes);
+	p_ramrod->bd_base.hi	= DMA_HI_LE(bd_chain_phys_addr);
+	p_ramrod->bd_base.lo	= DMA_LO_LE(bd_chain_phys_addr);
+
+	p_ramrod->num_of_pbl_pages	= cpu_to_le16(cqe_pbl_size);
+	p_ramrod->cqe_pbl_addr.hi	= DMA_HI_LE(cqe_pbl_addr);
+	p_ramrod->cqe_pbl_addr.lo	= DMA_LO_LE(cqe_pbl_addr);
+
+	rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+	return rc;
+}
+
+static int
+qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
+			  u16 opaque_fid,
+			  struct qed_queue_start_common_params *params,
+			  u16 bd_max_bytes,
+			  dma_addr_t bd_chain_phys_addr,
+			  dma_addr_t cqe_pbl_addr,
+			  u16 cqe_pbl_size,
+			  void __iomem **pp_prod)
+{
+	struct qed_hw_cid_data *p_rx_cid;
+	u64 init_prod_val = 0;
+	u16 abs_l2_queue = 0;
+	u8 abs_stats_id = 0;
+	int rc;
+
+	rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
+	if (rc != 0)
+		return rc;
+
+	rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
+	if (rc != 0)
+		return rc;
+
+	*pp_prod = (u8 __iomem *)p_hwfn->regview +
+				 GTT_BAR0_MAP_REG_MSDM_RAM +
+				 MSTORM_PRODS_OFFSET(abs_l2_queue);
+
+	/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+	__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+			  (u32 *)(&init_prod_val));
+
+	/* Allocate a CID for the queue */
+	p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
+	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+				 &p_rx_cid->cid);
+	if (rc) {
+		DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+		return rc;
+	}
+	p_rx_cid->b_cid_allocated = true;
+
+	rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
+					 opaque_fid,
+					 p_rx_cid->cid,
+					 params,
+					 abs_stats_id,
+					 bd_max_bytes,
+					 bd_chain_phys_addr,
+					 cqe_pbl_addr,
+					 cqe_pbl_size);
+
+	if (rc != 0)
+		qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+	return rc;
+}
+
+static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+				    u16 rx_queue_id,
+				    bool eq_completion_only,
+				    bool cqe_completion)
+{
+	struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+	struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
+	struct qed_sp_init_request_params sp_params;
+	struct qed_spq_entry *p_ent = NULL;
+	u16 abs_rx_q_id = 0;
+	int rc = -EINVAL;
+
+	memset(&sp_params, 0, sizeof(sp_params));
+	sp_params.ramrod_data_size = sizeof(*p_ramrod);
+	sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 p_rx_cid->cid,
+				 p_rx_cid->opaque_fid,
+				 ETH_RAMROD_RX_QUEUE_STOP,
+				 PROTOCOLID_ETH,
+				 &sp_params);
+	if (rc)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.rx_queue_stop;
+
+	qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+	qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+	p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+
+	/* Cleaning the queue requires the completion to arrive there.
+	 * In addition, VFs require the answer to come as eqe to PF.
+	 */
+	p_ramrod->complete_cqe_flg =
+		(!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
+		 !eq_completion_only) || cqe_completion;
+	p_ramrod->complete_event_flg =
+		!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
+		eq_completion_only;
+
+	rc = qed_spq_post(p_hwfn, p_ent, NULL);
+	if (rc)
+		return rc;
+
+	return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
+}
+
+static int
+qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
+			    u16  opaque_fid,
+			    u32  cid,
+			    struct qed_queue_start_common_params *p_params,
+			    u8  stats_id,
+			    dma_addr_t pbl_addr,
+			    u16 pbl_size,
+			    union qed_qm_pq_params *p_pq_params)
+{
+	struct tx_queue_start_ramrod_data *p_ramrod = NULL;
+	struct qed_sp_init_request_params sp_params;
+	struct qed_spq_entry *p_ent = NULL;
+	struct qed_hw_cid_data *p_tx_cid;
+	u8 abs_vport_id;
+	int rc = -EINVAL;
+	u16 pq_id;
+
+	/* Store information for the stop */
+	p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+	p_tx_cid->cid		= cid;
+	p_tx_cid->opaque_fid	= opaque_fid;
+
+	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+	if (rc)
+		return rc;
+
+	memset(&sp_params, 0, sizeof(sp_params));
+	sp_params.ramrod_data_size = sizeof(*p_ramrod);
+	sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent, cid,
+				 opaque_fid,
+				 ETH_RAMROD_TX_QUEUE_START,
+				 PROTOCOLID_ETH,
+				 &sp_params);
+	if (rc)
+		return rc;
+
+	p_ramrod		= &p_ent->ramrod.tx_queue_start;
+	p_ramrod->vport_id	= abs_vport_id;
+
+	p_ramrod->sb_id			= cpu_to_le16(p_params->sb);
+	p_ramrod->sb_index		= p_params->sb_idx;
+	p_ramrod->stats_counter_id	= stats_id;
+	p_ramrod->tc			= p_pq_params->eth.tc;
+
+	p_ramrod->pbl_size		= cpu_to_le16(pbl_size);
+	p_ramrod->pbl_base_addr.hi	= DMA_HI_LE(pbl_addr);
+	p_ramrod->pbl_base_addr.lo	= DMA_LO_LE(pbl_addr);
+
+	pq_id			= qed_get_qm_pq(p_hwfn,
+						PROTOCOLID_ETH,
+						p_pq_params);
+	p_ramrod->qm_pq_id	= cpu_to_le16(pq_id);
+
+	return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
+			  u16 opaque_fid,
+			  struct qed_queue_start_common_params *p_params,
+			  dma_addr_t pbl_addr,
+			  u16 pbl_size,
+			  void __iomem **pp_doorbell)
+{
+	struct qed_hw_cid_data *p_tx_cid;
+	union qed_qm_pq_params pq_params;
+	u8 abs_stats_id = 0;
+	int rc;
+
+	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
+	if (rc)
+		return rc;
+
+	p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
+	memset(p_tx_cid, 0, sizeof(*p_tx_cid));
+	memset(&pq_params, 0, sizeof(pq_params));
+
+	/* Allocate a CID for the queue */
+	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+				 &p_tx_cid->cid);
+	if (rc) {
+		DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+		return rc;
+	}
+	p_tx_cid->b_cid_allocated = true;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SP,
+		   "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+		   opaque_fid, p_tx_cid->cid,
+		   p_params->queue_id, p_params->vport_id, p_params->sb);
+
+	rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+					 opaque_fid,
+					 p_tx_cid->cid,
+					 p_params,
+					 abs_stats_id,
+					 pbl_addr,
+					 pbl_size,
+					 &pq_params);
+
+	*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+				     qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
+
+	if (rc)
+		qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+
+	return rc;
+}
+
+static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
+				    u16 tx_queue_id)
+{
+	struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+	struct qed_sp_init_request_params sp_params;
+	struct qed_spq_entry *p_ent = NULL;
+	int rc = -EINVAL;
+
+	memset(&sp_params, 0, sizeof(sp_params));
+	sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data);
+	sp_params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 p_tx_cid->cid,
+				 p_tx_cid->opaque_fid,
+				 ETH_RAMROD_TX_QUEUE_STOP,
+				 PROTOCOLID_ETH,
+				 &sp_params);
+	if (rc)
+		return rc;
+
+	rc = qed_spq_post(p_hwfn, p_ent, NULL);
+	if (rc)
+		return rc;
+
+	return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
+}
+
+static enum eth_filter_action
+qed_filter_action(enum qed_filter_opcode opcode)
+{
+	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+	switch (opcode) {
+	case QED_FILTER_ADD:
+		action = ETH_FILTER_ACTION_ADD;
+		break;
+	case QED_FILTER_REMOVE:
+		action = ETH_FILTER_ACTION_REMOVE;
+		break;
+	case QED_FILTER_REPLACE:
+	case QED_FILTER_FLUSH:
+		action = ETH_FILTER_ACTION_REPLACE;
+		break;
+	default:
+		action = MAX_ETH_FILTER_ACTION;
+	}
+
+	return action;
+}
+
+static void qed_set_fw_mac_addr(__le16 *fw_msb,
+				__le16 *fw_mid,
+				__le16 *fw_lsb,
+				u8 *mac)
+{
+	((u8 *)fw_msb)[0] = mac[1];
+	((u8 *)fw_msb)[1] = mac[0];
+	((u8 *)fw_mid)[0] = mac[3];
+	((u8 *)fw_mid)[1] = mac[2];
+	((u8 *)fw_lsb)[0] = mac[5];
+	((u8 *)fw_lsb)[1] = mac[4];
+}
+
+static int
+qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
+			u16 opaque_fid,
+			struct qed_filter_ucast *p_filter_cmd,
+			struct vport_filter_update_ramrod_data **pp_ramrod,
+			struct qed_spq_entry **pp_ent,
+			enum spq_mode comp_mode,
+			struct qed_spq_comp_cb *p_comp_data)
+{
+	u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+	struct vport_filter_update_ramrod_data *p_ramrod;
+	struct qed_sp_init_request_params sp_params;
+	struct eth_filter_cmd *p_first_filter;
+	struct eth_filter_cmd *p_second_filter;
+	enum eth_filter_action action;
+	int rc;
+
+	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+			  &vport_to_remove_from);
+	if (rc)
+		return rc;
+
+	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+			  &vport_to_add_to);
+	if (rc)
+		return rc;
+
+	memset(&sp_params, 0, sizeof(sp_params));
+	sp_params.ramrod_data_size = sizeof(**pp_ramrod);
+	sp_params.comp_mode = comp_mode;
+	sp_params.p_comp_data = p_comp_data;
+
+	rc = qed_sp_init_request(p_hwfn, pp_ent,
+				 qed_spq_get_cid(p_hwfn),
+				 opaque_fid,
+				 ETH_RAMROD_FILTERS_UPDATE,
+				 PROTOCOLID_ETH,
+				 &sp_params);
+	if (rc)
+		return rc;
+
+	*pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+	p_ramrod = *pp_ramrod;
+	p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+	p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+	switch (p_filter_cmd->opcode) {
+	case QED_FILTER_FLUSH:
+		p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break;
+	case QED_FILTER_MOVE:
+		p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
+	default:
+		p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
+	}
+
+	p_first_filter	= &p_ramrod->filter_cmds[0];
+	p_second_filter = &p_ramrod->filter_cmds[1];
+
+	switch (p_filter_cmd->type) {
+	case QED_FILTER_MAC:
+		p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
+	case QED_FILTER_VLAN:
+		p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
+	case QED_FILTER_MAC_VLAN:
+		p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
+	case QED_FILTER_INNER_MAC:
+		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
+	case QED_FILTER_INNER_VLAN:
+		p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
+	case QED_FILTER_INNER_PAIR:
+		p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
+	case QED_FILTER_INNER_MAC_VNI_PAIR:
+		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+		break;
+	case QED_FILTER_MAC_VNI_PAIR:
+		p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
+	case QED_FILTER_VNI:
+		p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
+	}
+
+	if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
+		qed_set_fw_mac_addr(&p_first_filter->mac_msb,
+				    &p_first_filter->mac_mid,
+				    &p_first_filter->mac_lsb,
+				    (u8 *)p_filter_cmd->mac);
+	}
+
+	if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+		p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
+
+	if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+	    (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+		p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
+
+	if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
+		p_second_filter->type		= p_first_filter->type;
+		p_second_filter->mac_msb	= p_first_filter->mac_msb;
+		p_second_filter->mac_mid	= p_first_filter->mac_mid;
+		p_second_filter->mac_lsb	= p_first_filter->mac_lsb;
+		p_second_filter->vlan_id	= p_first_filter->vlan_id;
+		p_second_filter->vni		= p_first_filter->vni;
+
+		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+		p_first_filter->vport_id = vport_to_remove_from;
+
+		p_second_filter->action		= ETH_FILTER_ACTION_ADD;
+		p_second_filter->vport_id	= vport_to_add_to;
+	} else {
+		action = qed_filter_action(p_filter_cmd->opcode);
+
+		if (action == MAX_ETH_FILTER_ACTION) {
+			DP_NOTICE(p_hwfn,
+				  "%d is not supported yet\n",
+				  p_filter_cmd->opcode);
+			return -EINVAL;
+		}
+
+		p_first_filter->action = action;
+		p_first_filter->vport_id = (p_filter_cmd->opcode ==
+					    QED_FILTER_REMOVE) ?
+					   vport_to_remove_from :
+					   vport_to_add_to;
+	}
+
+	return 0;
+}
+
+static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+				   u16 opaque_fid,
+				   struct qed_filter_ucast *p_filter_cmd,
+				   enum spq_mode comp_mode,
+				   struct qed_spq_comp_cb *p_comp_data)
+{
+	struct vport_filter_update_ramrod_data	*p_ramrod	= NULL;
+	struct qed_spq_entry			*p_ent		= NULL;
+	struct eth_filter_cmd_header		*p_header;
+	int					rc;
+
+	rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+				     &p_ramrod, &p_ent,
+				     comp_mode, p_comp_data);
+	if (rc != 0) {
+		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+		return rc;
+	}
+	p_header = &p_ramrod->filter_cmd_hdr;
+	p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+	rc = qed_spq_post(p_hwfn, p_ent, NULL);
+	if (rc != 0) {
+		DP_ERR(p_hwfn,
+		       "Unicast filter ADD command failed %d\n",
+		       rc);
+		return rc;
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SP,
+		   "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
+		   (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
+		   ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
+		   "REMOVE" :
+		   ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
+		    "MOVE" : "REPLACE")),
+		   (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
+		   ((p_filter_cmd->type == QED_FILTER_VLAN) ?
+		    "VLAN" : "MAC & VLAN"),
+		   p_ramrod->filter_cmd_hdr.cmd_cnt,
+		   p_filter_cmd->is_rx_filter,
+		   p_filter_cmd->is_tx_filter);
+	DP_VERBOSE(p_hwfn, QED_MSG_SP,
+		   "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+		   p_filter_cmd->vport_to_add_to,
+		   p_filter_cmd->vport_to_remove_from,
+		   p_filter_cmd->mac[0],
+		   p_filter_cmd->mac[1],
+		   p_filter_cmd->mac[2],
+		   p_filter_cmd->mac[3],
+		   p_filter_cmd->mac[4],
+		   p_filter_cmd->mac[5],
+		   p_filter_cmd->vlan);
+
+	return 0;
+}
+
+/*******************************************************************************
+ * Description:
+ *         Calculates crc 32 on a buffer
+ *         Note: crc32_length MUST be aligned to 8
+ * Return:
+ ******************************************************************************/
+static u32 qed_calc_crc32c(u8 *crc32_packet,
+			   u32 crc32_length,
+			   u32 crc32_seed,
+			   u8 complement)
+{
+	u32 byte = 0;
+	u32 bit = 0;
+	u8 msb = 0;
+	u8 current_byte = 0;
+	u32 crc32_result = crc32_seed;
+
+	if ((!crc32_packet) ||
+	    (crc32_length == 0) ||
+	    ((crc32_length % 8) != 0))
+		return crc32_result;
+	for (byte = 0; byte < crc32_length; byte++) {
+		current_byte = crc32_packet[byte];
+		for (bit = 0; bit < 8; bit++) {
+			msb = (u8)(crc32_result >> 31);
+			crc32_result = crc32_result << 1;
+			if (msb != (0x1 & (current_byte >> bit))) {
+				crc32_result = crc32_result ^ CRC32_POLY;
+				crc32_result |= 1; /*crc32_result[0] = 1;*/
+			}
+		}
+	}
+	return crc32_result;
+}
+
+static inline u32 qed_crc32c_le(u32 seed,
+				u8 *mac,
+				u32 len)
+{
+	u32 packet_buf[2] = { 0 };
+
+	memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
+	return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+}
+
+static u8 qed_mcast_bin_from_mac(u8 *mac)
+{
+	u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
+				mac, ETH_ALEN);
+
+	return crc & 0xff;
+}
+
+static int
+qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
+			u16 opaque_fid,
+			struct qed_filter_mcast *p_filter_cmd,
+			enum spq_mode comp_mode,
+			struct qed_spq_comp_cb *p_comp_data)
+{
+	unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+	struct vport_update_ramrod_data *p_ramrod = NULL;
+	struct qed_sp_init_request_params sp_params;
+	struct qed_spq_entry *p_ent = NULL;
+	u8 abs_vport_id = 0;
+	int rc, i;
+
+	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+				  &abs_vport_id);
+		if (rc)
+			return rc;
+	} else {
+		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+				  &abs_vport_id);
+		if (rc)
+			return rc;
+	}
+
+	memset(&sp_params, 0, sizeof(sp_params));
+	sp_params.ramrod_data_size = sizeof(*p_ramrod);
+	sp_params.comp_mode = comp_mode;
+	sp_params.p_comp_data = p_comp_data;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent,
+				 qed_spq_get_cid(p_hwfn),
+				 p_hwfn->hw_info.opaque_fid,
+				 ETH_RAMROD_VPORT_UPDATE,
+				 PROTOCOLID_ETH,
+				 &sp_params);
+
+	if (rc) {
+		DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+		return rc;
+	}
+
+	p_ramrod = &p_ent->ramrod.vport_update;
+	p_ramrod->common.update_approx_mcast_flg = 1;
+
+	/* explicitly clear out the entire vector */
+	memset(&p_ramrod->approx_mcast.bins, 0,
+	       sizeof(p_ramrod->approx_mcast.bins));
+	memset(bins, 0, sizeof(unsigned long) *
+	       ETH_MULTICAST_MAC_BINS_IN_REGS);
+	/* filter ADD op is explicit set op and it removes
+	 *  any existing filters for the vport
+	 */
+	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+			u32 bit;
+
+			bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+			__set_bit(bit, bins);
+		}
+
+		/* Convert to correct endianity */
+		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+			u32 *p_bins = (u32 *)bins;
+			struct vport_update_ramrod_mcast *approx_mcast;
+
+			approx_mcast = &p_ramrod->approx_mcast;
+			approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
+		}
+	}
+
+	p_ramrod->common.vport_id = abs_vport_id;
+
+	return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_filter_mcast_cmd(struct qed_dev *cdev,
+		     struct qed_filter_mcast *p_filter_cmd,
+		     enum spq_mode comp_mode,
+		     struct qed_spq_comp_cb *p_comp_data)
+{
+	int rc = 0;
+	int i;
+
+	/* only ADD and REMOVE operations are supported for multi-cast */
+	if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
+	     (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
+	    (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
+		return -EINVAL;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		u16 opaque_fid;
+
+		if (rc != 0)
+			break;
+
+		opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+		rc = qed_sp_eth_filter_mcast(p_hwfn,
+					     opaque_fid,
+					     p_filter_cmd,
+					     comp_mode,
+					     p_comp_data);
+	}
+	return rc;
+}
+
+static int qed_filter_ucast_cmd(struct qed_dev *cdev,
+				struct qed_filter_ucast *p_filter_cmd,
+				enum spq_mode comp_mode,
+				struct qed_spq_comp_cb *p_comp_data)
+{
+	int rc = 0;
+	int i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+		u16 opaque_fid;
+
+		if (rc != 0)
+			break;
+
+		opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+		rc = qed_sp_eth_filter_ucast(p_hwfn,
+					     opaque_fid,
+					     p_filter_cmd,
+					     comp_mode,
+					     p_comp_data);
+	}
+
+	return rc;
+}
+
+static int qed_fill_eth_dev_info(struct qed_dev *cdev,
+				 struct qed_dev_eth_info *info)
+{
+	int i;
+
+	memset(info, 0, sizeof(*info));
+
+	info->num_tc = 1;
+
+	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+		for_each_hwfn(cdev, i)
+			info->num_queues += FEAT_NUM(&cdev->hwfns[i],
+						     QED_PF_L2_QUE);
+		if (cdev->int_params.fp_msix_cnt)
+			info->num_queues = min_t(u8, info->num_queues,
+						 cdev->int_params.fp_msix_cnt);
+	} else {
+		info->num_queues = cdev->num_hwfns;
+	}
+
+	info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+	ether_addr_copy(info->port_mac,
+			cdev->hwfns[0].hw_info.hw_mac_addr);
+
+	qed_fill_dev_info(cdev, &info->common);
+
+	return 0;
+}
+
+static void qed_register_eth_ops(struct qed_dev *cdev,
+				 struct qed_eth_cb_ops *ops,
+				 void *cookie)
+{
+	cdev->protocol_ops.eth	= ops;
+	cdev->ops_cookie	= cookie;
+}
+
+static int qed_start_vport(struct qed_dev *cdev,
+			   u8 vport_id,
+			   u16 mtu,
+			   u8 drop_ttl0_flg,
+			   u8 inner_vlan_removal_en_flg)
+{
+	int rc, i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		rc = qed_sp_vport_start(p_hwfn,
+					p_hwfn->hw_info.concrete_fid,
+					p_hwfn->hw_info.opaque_fid,
+					vport_id,
+					mtu,
+					drop_ttl0_flg,
+					inner_vlan_removal_en_flg);
+
+		if (rc) {
+			DP_ERR(cdev, "Failed to start VPORT\n");
+			return rc;
+		}
+
+		qed_hw_start_fastpath(p_hwfn);
+
+		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+			   "Started V-PORT %d with MTU %d\n",
+			   vport_id, mtu);
+	}
+
+	qed_reset_vport_stats(cdev);
+
+	return 0;
+}
+
+static int qed_stop_vport(struct qed_dev *cdev,
+			  u8 vport_id)
+{
+	int rc, i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		rc = qed_sp_vport_stop(p_hwfn,
+				       p_hwfn->hw_info.opaque_fid,
+				       vport_id);
+
+		if (rc) {
+			DP_ERR(cdev, "Failed to stop VPORT\n");
+			return rc;
+		}
+	}
+	return 0;
+}
+
+static int qed_update_vport(struct qed_dev *cdev,
+			    struct qed_update_vport_params *params)
+{
+	struct qed_sp_vport_update_params sp_params;
+	struct qed_rss_params sp_rss_params;
+	int rc, i;
+
+	if (!cdev)
+		return -ENODEV;
+
+	memset(&sp_params, 0, sizeof(sp_params));
+	memset(&sp_rss_params, 0, sizeof(sp_rss_params));
+
+	/* Translate protocol params into sp params */
+	sp_params.vport_id = params->vport_id;
+	sp_params.update_vport_active_rx_flg =
+		params->update_vport_active_flg;
+	sp_params.update_vport_active_tx_flg =
+		params->update_vport_active_flg;
+	sp_params.vport_active_rx_flg = params->vport_active_flg;
+	sp_params.vport_active_tx_flg = params->vport_active_flg;
+
+	/* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
+	 * We need to re-fix the rss values per engine for CMT.
+	 */
+	if (cdev->num_hwfns > 1 && params->update_rss_flg) {
+		struct qed_update_vport_rss_params *rss =
+			&params->rss_params;
+		int k, max = 0;
+
+		/* Find largest entry, since it's possible RSS needs to
+		 * be disabled [in case only 1 queue per-hwfn]
+		 */
+		for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+			max = (max > rss->rss_ind_table[k]) ?
+				max : rss->rss_ind_table[k];
+
+		/* Either fix RSS values or disable RSS */
+		if (cdev->num_hwfns < max + 1) {
+			int divisor = (max + cdev->num_hwfns - 1) /
+				cdev->num_hwfns;
+
+			DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+				   "CMT - fixing RSS values (modulo %02x)\n",
+				   divisor);
+
+			for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
+				rss->rss_ind_table[k] =
+					rss->rss_ind_table[k] % divisor;
+		} else {
+			DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+				   "CMT - 1 queue per-hwfn; Disabling RSS\n");
+			params->update_rss_flg = 0;
+		}
+	}
+
+	/* Now, update the RSS configuration for actual configuration */
+	if (params->update_rss_flg) {
+		sp_rss_params.update_rss_config = 1;
+		sp_rss_params.rss_enable = 1;
+		sp_rss_params.update_rss_capabilities = 1;
+		sp_rss_params.update_rss_ind_table = 1;
+		sp_rss_params.update_rss_key = 1;
+		sp_rss_params.rss_caps = QED_RSS_IPV4 |
+					 QED_RSS_IPV6 |
+					 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+		sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
+		memcpy(sp_rss_params.rss_ind_table,
+		       params->rss_params.rss_ind_table,
+		       QED_RSS_IND_TABLE_SIZE * sizeof(u16));
+		memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
+		       QED_RSS_KEY_SIZE * sizeof(u32));
+	}
+	sp_params.rss_params = &sp_rss_params;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+		rc = qed_sp_vport_update(p_hwfn, &sp_params,
+					 QED_SPQ_MODE_EBLOCK,
+					 NULL);
+		if (rc) {
+			DP_ERR(cdev, "Failed to update VPORT\n");
+			return rc;
+		}
+
+		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+			   "Updated V-PORT %d: active_flag %d [update %d]\n",
+			   params->vport_id, params->vport_active_flg,
+			   params->update_vport_active_flg);
+	}
+
+	return 0;
+}
+
+static int qed_start_rxq(struct qed_dev *cdev,
+			 struct qed_queue_start_common_params *params,
+			 u16 bd_max_bytes,
+			 dma_addr_t bd_chain_phys_addr,
+			 dma_addr_t cqe_pbl_addr,
+			 u16 cqe_pbl_size,
+			 void __iomem **pp_prod)
+{
+	int rc, hwfn_index;
+	struct qed_hwfn *p_hwfn;
+
+	hwfn_index = params->rss_id % cdev->num_hwfns;
+	p_hwfn = &cdev->hwfns[hwfn_index];
+
+	/* Fix queue ID in 100g mode */
+	params->queue_id /= cdev->num_hwfns;
+
+	rc = qed_sp_eth_rx_queue_start(p_hwfn,
+				       p_hwfn->hw_info.opaque_fid,
+				       params,
+				       bd_max_bytes,
+				       bd_chain_phys_addr,
+				       cqe_pbl_addr,
+				       cqe_pbl_size,
+				       pp_prod);
+
+	if (rc) {
+		DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
+		return rc;
+	}
+
+	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+		   "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+		   params->queue_id, params->rss_id, params->vport_id,
+		   params->sb);
+
+	return 0;
+}
+
+static int qed_stop_rxq(struct qed_dev *cdev,
+			struct qed_stop_rxq_params *params)
+{
+	int rc, hwfn_index;
+	struct qed_hwfn *p_hwfn;
+
+	hwfn_index	= params->rss_id % cdev->num_hwfns;
+	p_hwfn		= &cdev->hwfns[hwfn_index];
+
+	rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+				      params->rx_queue_id / cdev->num_hwfns,
+				      params->eq_completion_only,
+				      false);
+	if (rc) {
+		DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qed_start_txq(struct qed_dev *cdev,
+			 struct qed_queue_start_common_params *p_params,
+			 dma_addr_t pbl_addr,
+			 u16 pbl_size,
+			 void __iomem **pp_doorbell)
+{
+	struct qed_hwfn *p_hwfn;
+	int rc, hwfn_index;
+
+	hwfn_index	= p_params->rss_id % cdev->num_hwfns;
+	p_hwfn		= &cdev->hwfns[hwfn_index];
+
+	/* Fix queue ID in 100g mode */
+	p_params->queue_id /= cdev->num_hwfns;
+
+	rc = qed_sp_eth_tx_queue_start(p_hwfn,
+				       p_hwfn->hw_info.opaque_fid,
+				       p_params,
+				       pbl_addr,
+				       pbl_size,
+				       pp_doorbell);
+
+	if (rc) {
+		DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
+		return rc;
+	}
+
+	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+		   "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
+		   p_params->queue_id, p_params->rss_id, p_params->vport_id,
+		   p_params->sb);
+
+	return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static int qed_fastpath_stop(struct qed_dev *cdev)
+{
+	qed_hw_stop_fastpath(cdev);
+
+	return 0;
+}
+
+static int qed_stop_txq(struct qed_dev *cdev,
+			struct qed_stop_txq_params *params)
+{
+	struct qed_hwfn *p_hwfn;
+	int rc, hwfn_index;
+
+	hwfn_index	= params->rss_id % cdev->num_hwfns;
+	p_hwfn		= &cdev->hwfns[hwfn_index];
+
+	rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+				      params->tx_queue_id / cdev->num_hwfns);
+	if (rc) {
+		DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
+					enum qed_filter_rx_mode_type type)
+{
+	struct qed_filter_accept_flags accept_flags;
+
+	memset(&accept_flags, 0, sizeof(accept_flags));
+
+	accept_flags.update_rx_mode_config	= 1;
+	accept_flags.update_tx_mode_config	= 1;
+	accept_flags.rx_accept_filter		= QED_ACCEPT_UCAST_MATCHED |
+						  QED_ACCEPT_MCAST_MATCHED |
+						  QED_ACCEPT_BCAST;
+	accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+					QED_ACCEPT_MCAST_MATCHED |
+					QED_ACCEPT_BCAST;
+
+	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
+		accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+						 QED_ACCEPT_MCAST_UNMATCHED;
+	else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
+		accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+
+	return qed_filter_accept_cmd(cdev, 0, accept_flags,
+				     QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_ucast(struct qed_dev *cdev,
+				      struct qed_filter_ucast_params *params)
+{
+	struct qed_filter_ucast ucast;
+
+	if (!params->vlan_valid && !params->mac_valid) {
+		DP_NOTICE(
+			cdev,
+			"Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+		return -EINVAL;
+	}
+
+	memset(&ucast, 0, sizeof(ucast));
+	switch (params->type) {
+	case QED_FILTER_XCAST_TYPE_ADD:
+		ucast.opcode = QED_FILTER_ADD;
+		break;
+	case QED_FILTER_XCAST_TYPE_DEL:
+		ucast.opcode = QED_FILTER_REMOVE;
+		break;
+	case QED_FILTER_XCAST_TYPE_REPLACE:
+		ucast.opcode = QED_FILTER_REPLACE;
+		break;
+	default:
+		DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
+			  params->type);
+	}
+
+	if (params->vlan_valid && params->mac_valid) {
+		ucast.type = QED_FILTER_MAC_VLAN;
+		ether_addr_copy(ucast.mac, params->mac);
+		ucast.vlan = params->vlan;
+	} else if (params->mac_valid) {
+		ucast.type = QED_FILTER_MAC;
+		ether_addr_copy(ucast.mac, params->mac);
+	} else {
+		ucast.type = QED_FILTER_VLAN;
+		ucast.vlan = params->vlan;
+	}
+
+	ucast.is_rx_filter = true;
+	ucast.is_tx_filter = true;
+
+	return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_mcast(struct qed_dev *cdev,
+				      struct qed_filter_mcast_params *params)
+{
+	struct qed_filter_mcast mcast;
+	int i;
+
+	memset(&mcast, 0, sizeof(mcast));
+	switch (params->type) {
+	case QED_FILTER_XCAST_TYPE_ADD:
+		mcast.opcode = QED_FILTER_ADD;
+		break;
+	case QED_FILTER_XCAST_TYPE_DEL:
+		mcast.opcode = QED_FILTER_REMOVE;
+		break;
+	default:
+		DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
+			  params->type);
+	}
+
+	mcast.num_mc_addrs = params->num;
+	for (i = 0; i < mcast.num_mc_addrs; i++)
+		ether_addr_copy(mcast.mac[i], params->mac[i]);
+
+	return qed_filter_mcast_cmd(cdev, &mcast,
+				    QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter(struct qed_dev *cdev,
+				struct qed_filter_params *params)
+{
+	enum qed_filter_rx_mode_type accept_flags;
+
+	switch (params->type) {
+	case QED_FILTER_TYPE_UCAST:
+		return qed_configure_filter_ucast(cdev, &params->filter.ucast);
+	case QED_FILTER_TYPE_MCAST:
+		return qed_configure_filter_mcast(cdev, &params->filter.mcast);
+	case QED_FILTER_TYPE_RX_MODE:
+		accept_flags = params->filter.accept_flags;
+		return qed_configure_filter_rx_mode(cdev, accept_flags);
+	default:
+		DP_NOTICE(cdev, "Unknown filter type %d\n",
+			  (int)params->type);
+		return -EINVAL;
+	}
+}
+
+static int qed_fp_cqe_completion(struct qed_dev *dev,
+				 u8 rss_id,
+				 struct eth_slow_path_rx_cqe *cqe)
+{
+	return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
+				      cqe);
+}
+
+static const struct qed_eth_ops qed_eth_ops_pass = {
+	.common = &qed_common_ops_pass,
+	.fill_dev_info = &qed_fill_eth_dev_info,
+	.register_ops = &qed_register_eth_ops,
+	.vport_start = &qed_start_vport,
+	.vport_stop = &qed_stop_vport,
+	.vport_update = &qed_update_vport,
+	.q_rx_start = &qed_start_rxq,
+	.q_rx_stop = &qed_stop_rxq,
+	.q_tx_start = &qed_start_txq,
+	.q_tx_stop = &qed_stop_txq,
+	.filter_config = &qed_configure_filter,
+	.fastpath_stop = &qed_fastpath_stop,
+	.eth_cqe_completion = &qed_fp_cqe_completion,
+	.get_vport_stats = &qed_get_vport_stats,
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+{
+	if (version != QED_ETH_INTERFACE_VERSION) {
+		pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
+			  version, QED_ETH_INTERFACE_VERSION);
+		return NULL;
+	}
+
+	return &qed_eth_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_eth_ops);
+
+void qed_put_eth_ops(void)
+{
+	/* TODO - reference count for module? */
+}
+EXPORT_SYMBOL(qed_put_eth_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
new file mode 100644
index 0000000..947c7af
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -0,0 +1,1169 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/stddef.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/qed/qed_if.h>
+
+#include "qed.h"
+#include "qed_sp.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_hw.h"
+
+static const char version[] =
+	"QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define FW_FILE_VERSION				\
+	__stringify(FW_MAJOR_VERSION) "."	\
+	__stringify(FW_MINOR_VERSION) "."	\
+	__stringify(FW_REVISION_VERSION) "."	\
+	__stringify(FW_ENGINEERING_VERSION)
+
+#define QED_FW_FILE_NAME	\
+	"qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
+
+static int __init qed_init(void)
+{
+	pr_notice("qed_init called\n");
+
+	pr_info("%s", version);
+
+	return 0;
+}
+
+static void __exit qed_cleanup(void)
+{
+	pr_notice("qed_cleanup called\n");
+}
+
+module_init(qed_init);
+module_exit(qed_cleanup);
+
+/* Check if the DMA controller on the machine can properly handle the DMA
+ * addressing required by the device.
+*/
+static int qed_set_coherency_mask(struct qed_dev *cdev)
+{
+	struct device *dev = &cdev->pdev->dev;
+
+	if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+		if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+			DP_NOTICE(cdev,
+				  "Can't request 64-bit consistent allocations\n");
+			return -EIO;
+		}
+	} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+		DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void qed_free_pci(struct qed_dev *cdev)
+{
+	struct pci_dev *pdev = cdev->pdev;
+
+	if (cdev->doorbells)
+		iounmap(cdev->doorbells);
+	if (cdev->regview)
+		iounmap(cdev->regview);
+	if (atomic_read(&pdev->enable_cnt) == 1)
+		pci_release_regions(pdev);
+
+	pci_disable_device(pdev);
+}
+
+/* Performs PCI initializations as well as initializing PCI-related parameters
+ * in the device structrue. Returns 0 in case of success.
+ */
+static int qed_init_pci(struct qed_dev *cdev,
+			struct pci_dev *pdev)
+{
+	int rc;
+
+	cdev->pdev = pdev;
+
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		DP_NOTICE(cdev, "Cannot enable PCI device\n");
+		goto err0;
+	}
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		DP_NOTICE(cdev, "No memory region found in bar #0\n");
+		rc = -EIO;
+		goto err1;
+	}
+
+	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+		DP_NOTICE(cdev, "No memory region found in bar #2\n");
+		rc = -EIO;
+		goto err1;
+	}
+
+	if (atomic_read(&pdev->enable_cnt) == 1) {
+		rc = pci_request_regions(pdev, "qed");
+		if (rc) {
+			DP_NOTICE(cdev,
+				  "Failed to request PCI memory resources\n");
+			goto err1;
+		}
+		pci_set_master(pdev);
+		pci_save_state(pdev);
+	}
+
+	if (!pci_is_pcie(pdev)) {
+		DP_NOTICE(cdev, "The bus is not PCI Express\n");
+		rc = -EIO;
+		goto err2;
+	}
+
+	cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (cdev->pci_params.pm_cap == 0)
+		DP_NOTICE(cdev, "Cannot find power management capability\n");
+
+	rc = qed_set_coherency_mask(cdev);
+	if (rc)
+		goto err2;
+
+	cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
+	cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
+	cdev->pci_params.irq = pdev->irq;
+
+	cdev->regview = pci_ioremap_bar(pdev, 0);
+	if (!cdev->regview) {
+		DP_NOTICE(cdev, "Cannot map register space, aborting\n");
+		rc = -ENOMEM;
+		goto err2;
+	}
+
+	cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+	cdev->db_size = pci_resource_len(cdev->pdev, 2);
+	cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+	if (!cdev->doorbells) {
+		DP_NOTICE(cdev, "Cannot map doorbell space\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+
+err2:
+	pci_release_regions(pdev);
+err1:
+	pci_disable_device(pdev);
+err0:
+	return rc;
+}
+
+int qed_fill_dev_info(struct qed_dev *cdev,
+		      struct qed_dev_info *dev_info)
+{
+	struct qed_ptt  *ptt;
+
+	memset(dev_info, 0, sizeof(struct qed_dev_info));
+
+	dev_info->num_hwfns = cdev->num_hwfns;
+	dev_info->pci_mem_start = cdev->pci_params.mem_start;
+	dev_info->pci_mem_end = cdev->pci_params.mem_end;
+	dev_info->pci_irq = cdev->pci_params.irq;
+	dev_info->is_mf = IS_MF(&cdev->hwfns[0]);
+	ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
+
+	dev_info->fw_major = FW_MAJOR_VERSION;
+	dev_info->fw_minor = FW_MINOR_VERSION;
+	dev_info->fw_rev = FW_REVISION_VERSION;
+	dev_info->fw_eng = FW_ENGINEERING_VERSION;
+	dev_info->mf_mode = cdev->mf_mode;
+
+	qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
+
+	ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+	if (ptt) {
+		qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+				       &dev_info->flash_size);
+
+		qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+	}
+
+	return 0;
+}
+
+static void qed_free_cdev(struct qed_dev *cdev)
+{
+	kfree((void *)cdev);
+}
+
+static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
+{
+	struct qed_dev *cdev;
+
+	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+	if (!cdev)
+		return cdev;
+
+	qed_init_struct(cdev);
+
+	return cdev;
+}
+
+/* Sets the requested power state */
+static int qed_set_power_state(struct qed_dev *cdev,
+			       pci_power_t state)
+{
+	if (!cdev)
+		return -ENODEV;
+
+	DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
+	return 0;
+}
+
+/* probing */
+static struct qed_dev *qed_probe(struct pci_dev *pdev,
+				 enum qed_protocol protocol,
+				 u32 dp_module,
+				 u8 dp_level)
+{
+	struct qed_dev *cdev;
+	int rc;
+
+	cdev = qed_alloc_cdev(pdev);
+	if (!cdev)
+		goto err0;
+
+	cdev->protocol = protocol;
+
+	qed_init_dp(cdev, dp_module, dp_level);
+
+	rc = qed_init_pci(cdev, pdev);
+	if (rc) {
+		DP_ERR(cdev, "init pci failed\n");
+		goto err1;
+	}
+	DP_INFO(cdev, "PCI init completed successfully\n");
+
+	rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
+	if (rc) {
+		DP_ERR(cdev, "hw prepare failed\n");
+		goto err2;
+	}
+
+	DP_INFO(cdev, "qed_probe completed successffuly\n");
+
+	return cdev;
+
+err2:
+	qed_free_pci(cdev);
+err1:
+	qed_free_cdev(cdev);
+err0:
+	return NULL;
+}
+
+static void qed_remove(struct qed_dev *cdev)
+{
+	if (!cdev)
+		return;
+
+	qed_hw_remove(cdev);
+
+	qed_free_pci(cdev);
+
+	qed_set_power_state(cdev, PCI_D3hot);
+
+	qed_free_cdev(cdev);
+}
+
+static void qed_disable_msix(struct qed_dev *cdev)
+{
+	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+		pci_disable_msix(cdev->pdev);
+		kfree(cdev->int_params.msix_table);
+	} else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
+		pci_disable_msi(cdev->pdev);
+	}
+
+	memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
+}
+
+static int qed_enable_msix(struct qed_dev *cdev,
+			   struct qed_int_params *int_params)
+{
+	int i, rc, cnt;
+
+	cnt = int_params->in.num_vectors;
+
+	for (i = 0; i < cnt; i++)
+		int_params->msix_table[i].entry = i;
+
+	rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
+				   int_params->in.min_msix_cnt, cnt);
+	if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
+	    (rc % cdev->num_hwfns)) {
+		pci_disable_msix(cdev->pdev);
+
+		/* If fastpath is initialized, we need at least one interrupt
+		 * per hwfn [and the slow path interrupts]. New requested number
+		 * should be a multiple of the number of hwfns.
+		 */
+		cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
+		DP_NOTICE(cdev,
+			  "Trying to enable MSI-X with less vectors (%d out of %d)\n",
+			  cnt, int_params->in.num_vectors);
+		rc = pci_enable_msix_exact(cdev->pdev,
+					   int_params->msix_table, cnt);
+		if (!rc)
+			rc = cnt;
+	}
+
+	if (rc > 0) {
+		/* MSI-x configuration was achieved */
+		int_params->out.int_mode = QED_INT_MODE_MSIX;
+		int_params->out.num_vectors = rc;
+		rc = 0;
+	} else {
+		DP_NOTICE(cdev,
+			  "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
+			  cnt, rc);
+	}
+
+	return rc;
+}
+
+/* This function outputs the int mode and the number of enabled msix vector */
+static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
+{
+	struct qed_int_params *int_params = &cdev->int_params;
+	struct msix_entry *tbl;
+	int rc = 0, cnt;
+
+	switch (int_params->in.int_mode) {
+	case QED_INT_MODE_MSIX:
+		/* Allocate MSIX table */
+		cnt = int_params->in.num_vectors;
+		int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
+		if (!int_params->msix_table) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		/* Enable MSIX */
+		rc = qed_enable_msix(cdev, int_params);
+		if (!rc)
+			goto out;
+
+		DP_NOTICE(cdev, "Failed to enable MSI-X\n");
+		kfree(int_params->msix_table);
+		if (force_mode)
+			goto out;
+		/* Fallthrough */
+
+	case QED_INT_MODE_MSI:
+		rc = pci_enable_msi(cdev->pdev);
+		if (!rc) {
+			int_params->out.int_mode = QED_INT_MODE_MSI;
+			goto out;
+		}
+
+		DP_NOTICE(cdev, "Failed to enable MSI\n");
+		if (force_mode)
+			goto out;
+		/* Fallthrough */
+
+	case QED_INT_MODE_INTA:
+			int_params->out.int_mode = QED_INT_MODE_INTA;
+			rc = 0;
+			goto out;
+	default:
+		DP_NOTICE(cdev, "Unknown int_mode value %d\n",
+			  int_params->in.int_mode);
+		rc = -EINVAL;
+	}
+
+out:
+	cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
+
+	return rc;
+}
+
+static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
+				    int index, void(*handler)(void *))
+{
+	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+	int relative_idx = index / cdev->num_hwfns;
+
+	hwfn->simd_proto_handler[relative_idx].func = handler;
+	hwfn->simd_proto_handler[relative_idx].token = token;
+}
+
+static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
+{
+	struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+	int relative_idx = index / cdev->num_hwfns;
+
+	memset(&hwfn->simd_proto_handler[relative_idx], 0,
+	       sizeof(struct qed_simd_fp_handler));
+}
+
+static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
+{
+	tasklet_schedule((struct tasklet_struct *)tasklet);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t qed_single_int(int irq, void *dev_instance)
+{
+	struct qed_dev *cdev = (struct qed_dev *)dev_instance;
+	struct qed_hwfn *hwfn;
+	irqreturn_t rc = IRQ_NONE;
+	u64 status;
+	int i, j;
+
+	for (i = 0; i < cdev->num_hwfns; i++) {
+		status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
+
+		if (!status)
+			continue;
+
+		hwfn = &cdev->hwfns[i];
+
+		/* Slowpath interrupt */
+		if (unlikely(status & 0x1)) {
+			tasklet_schedule(hwfn->sp_dpc);
+			status &= ~0x1;
+			rc = IRQ_HANDLED;
+		}
+
+		/* Fastpath interrupts */
+		for (j = 0; j < 64; j++) {
+			if ((0x2ULL << j) & status) {
+				hwfn->simd_proto_handler[j].func(
+					hwfn->simd_proto_handler[j].token);
+				status &= ~(0x2ULL << j);
+				rc = IRQ_HANDLED;
+			}
+		}
+
+		if (unlikely(status))
+			DP_VERBOSE(hwfn, NETIF_MSG_INTR,
+				   "got an unknown interrupt status 0x%llx\n",
+				   status);
+	}
+
+	return rc;
+}
+
+static int qed_slowpath_irq_req(struct qed_dev *cdev)
+{
+	int i = 0, rc = 0;
+
+	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+		/* Request all the slowpath MSI-X vectors */
+		for (i = 0; i < cdev->num_hwfns; i++) {
+			snprintf(cdev->hwfns[i].name, NAME_SIZE,
+				 "sp-%d-%02x:%02x.%02x",
+				 i, cdev->pdev->bus->number,
+				 PCI_SLOT(cdev->pdev->devfn),
+				 cdev->hwfns[i].abs_pf_id);
+
+			rc = request_irq(cdev->int_params.msix_table[i].vector,
+					 qed_msix_sp_int, 0,
+					 cdev->hwfns[i].name,
+					 cdev->hwfns[i].sp_dpc);
+			if (rc)
+				break;
+
+			DP_VERBOSE(&cdev->hwfns[i],
+				   (NETIF_MSG_INTR | QED_MSG_SP),
+				   "Requested slowpath MSI-X\n");
+		}
+
+		if (i != cdev->num_hwfns) {
+			/* Free already request MSI-X vectors */
+			for (i--; i >= 0; i--) {
+				unsigned int vec =
+					cdev->int_params.msix_table[i].vector;
+				synchronize_irq(vec);
+				free_irq(cdev->int_params.msix_table[i].vector,
+					 cdev->hwfns[i].sp_dpc);
+			}
+		}
+	} else {
+		unsigned long flags = 0;
+
+		snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
+			 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
+			 PCI_FUNC(cdev->pdev->devfn));
+
+		if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
+			flags |= IRQF_SHARED;
+
+		rc = request_irq(cdev->pdev->irq, qed_single_int,
+				 flags, cdev->name, cdev);
+	}
+
+	return rc;
+}
+
+static void qed_slowpath_irq_free(struct qed_dev *cdev)
+{
+	int i;
+
+	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+		for_each_hwfn(cdev, i) {
+			synchronize_irq(cdev->int_params.msix_table[i].vector);
+			free_irq(cdev->int_params.msix_table[i].vector,
+				 cdev->hwfns[i].sp_dpc);
+		}
+	} else {
+		free_irq(cdev->pdev->irq, cdev);
+	}
+}
+
+static int qed_nic_stop(struct qed_dev *cdev)
+{
+	int i, rc;
+
+	rc = qed_hw_stop(cdev);
+
+	for (i = 0; i < cdev->num_hwfns; i++) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		if (p_hwfn->b_sp_dpc_enabled) {
+			tasklet_disable(p_hwfn->sp_dpc);
+			p_hwfn->b_sp_dpc_enabled = false;
+			DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
+				   "Disabled sp taskelt [hwfn %d] at %p\n",
+				   i, p_hwfn->sp_dpc);
+		}
+	}
+
+	return rc;
+}
+
+static int qed_nic_reset(struct qed_dev *cdev)
+{
+	int rc;
+
+	rc = qed_hw_reset(cdev);
+	if (rc)
+		return rc;
+
+	qed_resc_free(cdev);
+
+	return 0;
+}
+
+static int qed_nic_setup(struct qed_dev *cdev)
+{
+	int rc;
+
+	rc = qed_resc_alloc(cdev);
+	if (rc)
+		return rc;
+
+	DP_INFO(cdev, "Allocated qed resources\n");
+
+	qed_resc_setup(cdev);
+
+	return rc;
+}
+
+static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
+{
+	int limit = 0;
+
+	/* Mark the fastpath as free/used */
+	cdev->int_params.fp_initialized = cnt ? true : false;
+
+	if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
+		limit = cdev->num_hwfns * 63;
+	else if (cdev->int_params.fp_msix_cnt)
+		limit = cdev->int_params.fp_msix_cnt;
+
+	if (!limit)
+		return -ENOMEM;
+
+	return min_t(int, cnt, limit);
+}
+
+static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
+{
+	memset(info, 0, sizeof(struct qed_int_info));
+
+	if (!cdev->int_params.fp_initialized) {
+		DP_INFO(cdev,
+			"Protocol driver requested interrupt information, but its support is not yet configured\n");
+		return -EINVAL;
+	}
+
+	/* Need to expose only MSI-X information; Single IRQ is handled solely
+	 * by qed.
+	 */
+	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+		int msix_base = cdev->int_params.fp_msix_base;
+
+		info->msix_cnt = cdev->int_params.fp_msix_cnt;
+		info->msix = &cdev->int_params.msix_table[msix_base];
+	}
+
+	return 0;
+}
+
+static int qed_slowpath_setup_int(struct qed_dev *cdev,
+				  enum qed_int_mode int_mode)
+{
+	int rc, i;
+	u8 num_vectors = 0;
+
+	memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+
+	cdev->int_params.in.int_mode = int_mode;
+	for_each_hwfn(cdev, i)
+		num_vectors +=  qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1;
+	cdev->int_params.in.num_vectors = num_vectors;
+
+	/* We want a minimum of one slowpath and one fastpath vector per hwfn */
+	cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
+
+	rc = qed_set_int_mode(cdev, false);
+	if (rc)  {
+		DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
+		return rc;
+	}
+
+	cdev->int_params.fp_msix_base = cdev->num_hwfns;
+	cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
+				       cdev->num_hwfns;
+
+	return 0;
+}
+
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
+		   u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+	int rc;
+
+	p_hwfn->stream->next_in = input_buf;
+	p_hwfn->stream->avail_in = input_len;
+	p_hwfn->stream->next_out = unzip_buf;
+	p_hwfn->stream->avail_out = max_size;
+
+	rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+	if (rc != Z_OK) {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
+			   rc);
+		return 0;
+	}
+
+	rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
+	zlib_inflateEnd(p_hwfn->stream);
+
+	if (rc != Z_OK && rc != Z_STREAM_END) {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
+			   p_hwfn->stream->msg, rc);
+		return 0;
+	}
+
+	return p_hwfn->stream->total_out / 4;
+}
+
+static int qed_alloc_stream_mem(struct qed_dev *cdev)
+{
+	int i;
+	void *workspace;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
+		if (!p_hwfn->stream)
+			return -ENOMEM;
+
+		workspace = vzalloc(zlib_inflate_workspacesize());
+		if (!workspace)
+			return -ENOMEM;
+		p_hwfn->stream->workspace = workspace;
+	}
+
+	return 0;
+}
+
+static void qed_free_stream_mem(struct qed_dev *cdev)
+{
+	int i;
+
+	for_each_hwfn(cdev, i) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		if (!p_hwfn->stream)
+			return;
+
+		vfree(p_hwfn->stream->workspace);
+		kfree(p_hwfn->stream);
+	}
+}
+
+static void qed_update_pf_params(struct qed_dev *cdev,
+				 struct qed_pf_params *params)
+{
+	int i;
+
+	for (i = 0; i < cdev->num_hwfns; i++) {
+		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+		p_hwfn->pf_params = *params;
+	}
+}
+
+static int qed_slowpath_start(struct qed_dev *cdev,
+			      struct qed_slowpath_params *params)
+{
+	struct qed_mcp_drv_version drv_version;
+	const u8 *data = NULL;
+	struct qed_hwfn *hwfn;
+	int rc;
+
+	rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+			      &cdev->pdev->dev);
+	if (rc) {
+		DP_NOTICE(cdev,
+			  "Failed to find fw file - /lib/firmware/%s\n",
+			  QED_FW_FILE_NAME);
+		goto err;
+	}
+
+	rc = qed_nic_setup(cdev);
+	if (rc)
+		goto err;
+
+	rc = qed_slowpath_setup_int(cdev, params->int_mode);
+	if (rc)
+		goto err1;
+
+	/* Request the slowpath IRQ */
+	rc = qed_slowpath_irq_req(cdev);
+	if (rc)
+		goto err2;
+
+	/* Allocate stream for unzipping */
+	rc = qed_alloc_stream_mem(cdev);
+	if (rc) {
+		DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+		goto err3;
+	}
+
+	/* Start the slowpath */
+	data = cdev->firmware->data;
+
+	rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
+			 true, data);
+	if (rc)
+		goto err3;
+
+	DP_INFO(cdev,
+		"HW initialization and function start completed successfully\n");
+
+	hwfn = QED_LEADING_HWFN(cdev);
+	drv_version.version = (params->drv_major << 24) |
+			      (params->drv_minor << 16) |
+			      (params->drv_rev << 8) |
+			      (params->drv_eng);
+	strlcpy(drv_version.name, params->name,
+		MCP_DRV_VER_STR_SIZE - 4);
+	rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+				      &drv_version);
+	if (rc) {
+		DP_NOTICE(cdev, "Failed sending drv version command\n");
+		return rc;
+	}
+
+	return 0;
+
+err3:
+	qed_free_stream_mem(cdev);
+	qed_slowpath_irq_free(cdev);
+err2:
+	qed_disable_msix(cdev);
+err1:
+	qed_resc_free(cdev);
+err:
+	release_firmware(cdev->firmware);
+
+	return rc;
+}
+
+static int qed_slowpath_stop(struct qed_dev *cdev)
+{
+	if (!cdev)
+		return -ENODEV;
+
+	qed_free_stream_mem(cdev);
+
+	qed_nic_stop(cdev);
+	qed_slowpath_irq_free(cdev);
+
+	qed_disable_msix(cdev);
+	qed_nic_reset(cdev);
+
+	release_firmware(cdev->firmware);
+
+	return 0;
+}
+
+static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
+		       char ver_str[VER_SIZE])
+{
+	int i;
+
+	memcpy(cdev->name, name, NAME_SIZE);
+	for_each_hwfn(cdev, i)
+		snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+
+	memcpy(cdev->ver_str, ver_str, VER_SIZE);
+	cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+}
+
+static u32 qed_sb_init(struct qed_dev *cdev,
+		       struct qed_sb_info *sb_info,
+		       void *sb_virt_addr,
+		       dma_addr_t sb_phy_addr, u16 sb_id,
+		       enum qed_sb_type type)
+{
+	struct qed_hwfn *p_hwfn;
+	int hwfn_index;
+	u16 rel_sb_id;
+	u8 n_hwfns;
+	u32 rc;
+
+	/* RoCE uses single engine and CMT uses two engines. When using both
+	 * we force only a single engine. Storage uses only engine 0 too.
+	 */
+	if (type == QED_SB_TYPE_L2_QUEUE)
+		n_hwfns = cdev->num_hwfns;
+	else
+		n_hwfns = 1;
+
+	hwfn_index = sb_id % n_hwfns;
+	p_hwfn = &cdev->hwfns[hwfn_index];
+	rel_sb_id = sb_id / n_hwfns;
+
+	DP_VERBOSE(cdev, NETIF_MSG_INTR,
+		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+		   hwfn_index, rel_sb_id, sb_id);
+
+	rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
+			     sb_virt_addr, sb_phy_addr, rel_sb_id);
+
+	return rc;
+}
+
+static u32 qed_sb_release(struct qed_dev *cdev,
+			  struct qed_sb_info *sb_info,
+			  u16 sb_id)
+{
+	struct qed_hwfn *p_hwfn;
+	int hwfn_index;
+	u16 rel_sb_id;
+	u32 rc;
+
+	hwfn_index = sb_id % cdev->num_hwfns;
+	p_hwfn = &cdev->hwfns[hwfn_index];
+	rel_sb_id = sb_id / cdev->num_hwfns;
+
+	DP_VERBOSE(cdev, NETIF_MSG_INTR,
+		   "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+		   hwfn_index, rel_sb_id, sb_id);
+
+	rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
+
+	return rc;
+}
+
+static int qed_set_link(struct qed_dev *cdev,
+			struct qed_link_params *params)
+{
+	struct qed_hwfn *hwfn;
+	struct qed_mcp_link_params *link_params;
+	struct qed_ptt *ptt;
+	int rc;
+
+	if (!cdev)
+		return -ENODEV;
+
+	/* The link should be set only once per PF */
+	hwfn = &cdev->hwfns[0];
+
+	ptt = qed_ptt_acquire(hwfn);
+	if (!ptt)
+		return -EBUSY;
+
+	link_params = qed_mcp_get_link_params(hwfn);
+	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+		link_params->speed.autoneg = params->autoneg;
+	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
+		link_params->speed.advertised_speeds = 0;
+		if ((params->adv_speeds & SUPPORTED_1000baseT_Half) ||
+		    (params->adv_speeds & SUPPORTED_1000baseT_Full))
+			link_params->speed.advertised_speeds |=
+				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+		if (params->adv_speeds & SUPPORTED_10000baseKR_Full)
+			link_params->speed.advertised_speeds |=
+				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+		if (params->adv_speeds & SUPPORTED_40000baseLR4_Full)
+			link_params->speed.advertised_speeds |=
+				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+		if (params->adv_speeds & 0)
+			link_params->speed.advertised_speeds |=
+				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+		if (params->adv_speeds & 0)
+			link_params->speed.advertised_speeds |=
+				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G;
+	}
+	if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
+		link_params->speed.forced_speed = params->forced_speed;
+
+	rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
+
+	qed_ptt_release(hwfn, ptt);
+
+	return rc;
+}
+
+static int qed_get_port_type(u32 media_type)
+{
+	int port_type;
+
+	switch (media_type) {
+	case MEDIA_SFPP_10G_FIBER:
+	case MEDIA_SFP_1G_FIBER:
+	case MEDIA_XFP_FIBER:
+	case MEDIA_KR:
+		port_type = PORT_FIBRE;
+		break;
+	case MEDIA_DA_TWINAX:
+		port_type = PORT_DA;
+		break;
+	case MEDIA_BASE_T:
+		port_type = PORT_TP;
+		break;
+	case MEDIA_NOT_PRESENT:
+		port_type = PORT_NONE;
+		break;
+	case MEDIA_UNSPECIFIED:
+	default:
+		port_type = PORT_OTHER;
+		break;
+	}
+	return port_type;
+}
+
+static void qed_fill_link(struct qed_hwfn *hwfn,
+			  struct qed_link_output *if_link)
+{
+	struct qed_mcp_link_params params;
+	struct qed_mcp_link_state link;
+	struct qed_mcp_link_capabilities link_caps;
+	u32 media_type;
+
+	memset(if_link, 0, sizeof(*if_link));
+
+	/* Prepare source inputs */
+	memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+	memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+	memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
+	       sizeof(link_caps));
+
+	/* Set the link parameters to pass to protocol driver */
+	if (link.link_up)
+		if_link->link_up = true;
+
+	/* TODO - at the moment assume supported and advertised speed equal */
+	if_link->supported_caps = SUPPORTED_FIBRE;
+	if (params.speed.autoneg)
+		if_link->supported_caps |= SUPPORTED_Autoneg;
+	if (params.pause.autoneg ||
+	    (params.pause.forced_rx && params.pause.forced_tx))
+		if_link->supported_caps |= SUPPORTED_Asym_Pause;
+	if (params.pause.autoneg || params.pause.forced_rx ||
+	    params.pause.forced_tx)
+		if_link->supported_caps |= SUPPORTED_Pause;
+
+	if_link->advertised_caps = if_link->supported_caps;
+	if (params.speed.advertised_speeds &
+	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+		if_link->advertised_caps |= SUPPORTED_1000baseT_Half |
+					   SUPPORTED_1000baseT_Full;
+	if (params.speed.advertised_speeds &
+	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+		if_link->advertised_caps |= SUPPORTED_10000baseKR_Full;
+	if (params.speed.advertised_speeds &
+		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+		if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full;
+	if (params.speed.advertised_speeds &
+		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+		if_link->advertised_caps |= 0;
+	if (params.speed.advertised_speeds &
+		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+		if_link->advertised_caps |= 0;
+
+	if (link_caps.speed_capabilities &
+	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+		if_link->supported_caps |= SUPPORTED_1000baseT_Half |
+					   SUPPORTED_1000baseT_Full;
+	if (link_caps.speed_capabilities &
+	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+		if_link->supported_caps |= SUPPORTED_10000baseKR_Full;
+	if (link_caps.speed_capabilities &
+		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+		if_link->supported_caps |= SUPPORTED_40000baseLR4_Full;
+	if (link_caps.speed_capabilities &
+		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+		if_link->supported_caps |= 0;
+	if (link_caps.speed_capabilities &
+		NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+		if_link->supported_caps |= 0;
+
+	if (link.link_up)
+		if_link->speed = link.speed;
+
+	/* TODO - fill duplex properly */
+	if_link->duplex = DUPLEX_FULL;
+	qed_mcp_get_media_type(hwfn->cdev, &media_type);
+	if_link->port = qed_get_port_type(media_type);
+
+	if_link->autoneg = params.speed.autoneg;
+
+	if (params.pause.autoneg)
+		if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+	if (params.pause.forced_rx)
+		if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+	if (params.pause.forced_tx)
+		if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+
+	/* Link partner capabilities */
+	if (link.partner_adv_speed &
+	    QED_LINK_PARTNER_SPEED_1G_HD)
+		if_link->lp_caps |= SUPPORTED_1000baseT_Half;
+	if (link.partner_adv_speed &
+	    QED_LINK_PARTNER_SPEED_1G_FD)
+		if_link->lp_caps |= SUPPORTED_1000baseT_Full;
+	if (link.partner_adv_speed &
+	    QED_LINK_PARTNER_SPEED_10G)
+		if_link->lp_caps |= SUPPORTED_10000baseKR_Full;
+	if (link.partner_adv_speed &
+	    QED_LINK_PARTNER_SPEED_40G)
+		if_link->lp_caps |= SUPPORTED_40000baseLR4_Full;
+	if (link.partner_adv_speed &
+	    QED_LINK_PARTNER_SPEED_50G)
+		if_link->lp_caps |= 0;
+	if (link.partner_adv_speed &
+	    QED_LINK_PARTNER_SPEED_100G)
+		if_link->lp_caps |= 0;
+
+	if (link.an_complete)
+		if_link->lp_caps |= SUPPORTED_Autoneg;
+
+	if (link.partner_adv_pause)
+		if_link->lp_caps |= SUPPORTED_Pause;
+	if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
+	    link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
+		if_link->lp_caps |= SUPPORTED_Asym_Pause;
+}
+
+static void qed_get_current_link(struct qed_dev *cdev,
+				 struct qed_link_output *if_link)
+{
+	qed_fill_link(&cdev->hwfns[0], if_link);
+}
+
+void qed_link_update(struct qed_hwfn *hwfn)
+{
+	void *cookie = hwfn->cdev->ops_cookie;
+	struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+	struct qed_link_output if_link;
+
+	qed_fill_link(hwfn, &if_link);
+
+	if (IS_LEAD_HWFN(hwfn) && cookie)
+		op->link_update(cookie, &if_link);
+}
+
+static int qed_drain(struct qed_dev *cdev)
+{
+	struct qed_hwfn *hwfn;
+	struct qed_ptt *ptt;
+	int i, rc;
+
+	for_each_hwfn(cdev, i) {
+		hwfn = &cdev->hwfns[i];
+		ptt = qed_ptt_acquire(hwfn);
+		if (!ptt) {
+			DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
+			return -EBUSY;
+		}
+		rc = qed_mcp_drain(hwfn, ptt);
+		if (rc)
+			return rc;
+		qed_ptt_release(hwfn, ptt);
+	}
+
+	return 0;
+}
+
+const struct qed_common_ops qed_common_ops_pass = {
+	.probe = &qed_probe,
+	.remove = &qed_remove,
+	.set_power_state = &qed_set_power_state,
+	.set_id = &qed_set_id,
+	.update_pf_params = &qed_update_pf_params,
+	.slowpath_start = &qed_slowpath_start,
+	.slowpath_stop = &qed_slowpath_stop,
+	.set_fp_int = &qed_set_int_fp,
+	.get_fp_int = &qed_get_int_fp,
+	.sb_init = &qed_sb_init,
+	.sb_release = &qed_sb_release,
+	.simd_handler_config = &qed_simd_handler_config,
+	.simd_handler_clean = &qed_simd_handler_clean,
+	.set_link = &qed_set_link,
+	.get_link = &qed_get_current_link,
+	.drain = &qed_drain,
+	.update_msglvl = &qed_init_dp,
+	.chain_alloc = &qed_chain_alloc,
+	.chain_free = &qed_chain_free,
+};
+
+u32 qed_get_protocol_version(enum qed_protocol protocol)
+{
+	switch (protocol) {
+	case QED_PROTOCOL_ETH:
+		return QED_ETH_INTERFACE_VERSION;
+	default:
+		return 0;
+	}
+}
+EXPORT_SYMBOL(qed_get_protocol_version);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
new file mode 100644
index 0000000..20d048c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -0,0 +1,860 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#define CHIP_MCP_RESP_ITER_US 10
+
+#define QED_DRV_MB_MAX_RETRIES	(500 * 1000)	/* Account for 5 sec */
+#define QED_MCP_RESET_RETRIES	(50 * 1000)	/* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val)	     \
+	qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+	       _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+	qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val)  \
+	DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+		     offsetof(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field)	   \
+	DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+		     offsetof(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+		  DRV_ID_PDA_COMP_VER_SHIFT)
+
+#define MCP_BYTES_PER_MBIT_SHIFT 17
+
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
+{
+	if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+		return false;
+	return true;
+}
+
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+			   struct qed_ptt *p_ptt)
+{
+	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+					PUBLIC_PORT);
+	u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
+
+	p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+						   MFW_PORT(p_hwfn));
+	DP_VERBOSE(p_hwfn, QED_MSG_SP,
+		   "port_addr = 0x%x, port_id 0x%02x\n",
+		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt)
+{
+	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+	u32 tmp, i;
+
+	if (!p_hwfn->mcp_info->public_base)
+		return;
+
+	for (i = 0; i < length; i++) {
+		tmp = qed_rd(p_hwfn, p_ptt,
+			     p_hwfn->mcp_info->mfw_mb_addr +
+			     (i << 2) + sizeof(u32));
+
+		/* The MB data is actually BE; Need to force it to cpu */
+		((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
+			be32_to_cpu((__force __be32)tmp);
+	}
+}
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn)
+{
+	if (p_hwfn->mcp_info) {
+		kfree(p_hwfn->mcp_info->mfw_mb_cur);
+		kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+	}
+	kfree(p_hwfn->mcp_info);
+
+	return 0;
+}
+
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
+				struct qed_ptt *p_ptt)
+{
+	struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+	u32 drv_mb_offsize, mfw_mb_offsize;
+	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+	p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+	if (!p_info->public_base)
+		return 0;
+
+	p_info->public_base |= GRCBASE_MCP;
+
+	/* Calculate the driver and MFW mailbox address */
+	drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
+				SECTION_OFFSIZE_ADDR(p_info->public_base,
+						     PUBLIC_DRV_MB));
+	p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+	DP_VERBOSE(p_hwfn, QED_MSG_SP,
+		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+	/* Set the MFW MB address */
+	mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+				SECTION_OFFSIZE_ADDR(p_info->public_base,
+						     PUBLIC_MFW_MB));
+	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+	p_info->mfw_mb_length =	(u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
+
+	/* Get the current driver mailbox sequence before sending
+	 * the first command
+	 */
+	p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+			     DRV_MSG_SEQ_NUMBER_MASK;
+
+	/* Get current FW pulse sequence */
+	p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+				DRV_PULSE_SEQ_MASK;
+
+	p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+	return 0;
+}
+
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt)
+{
+	struct qed_mcp_info *p_info;
+	u32 size;
+
+	/* Allocate mcp_info structure */
+	p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC);
+	if (!p_hwfn->mcp_info)
+		goto err;
+	p_info = p_hwfn->mcp_info;
+
+	if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
+		DP_NOTICE(p_hwfn, "MCP is not initialized\n");
+		/* Do not free mcp_info here, since public_base indicate that
+		 * the MCP is not initialized
+		 */
+		return 0;
+	}
+
+	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+	p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC);
+	p_info->mfw_mb_shadow =
+		kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
+				p_info->mfw_mb_length), GFP_ATOMIC);
+	if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+		goto err;
+
+	/* Initialize the MFW mutex */
+	mutex_init(&p_info->mutex);
+
+	return 0;
+
+err:
+	DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
+	qed_mcp_free(p_hwfn);
+	return -ENOMEM;
+}
+
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+		  struct qed_ptt *p_ptt)
+{
+	u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
+	u8 delay = CHIP_MCP_RESP_ITER_US;
+	u32 org_mcp_reset_seq, cnt = 0;
+	int rc = 0;
+
+	/* Set drv command along with the updated sequence */
+	org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
+		  (DRV_MSG_CODE_MCP_RESET | seq));
+
+	do {
+		/* Wait for MFW response */
+		udelay(delay);
+		/* Give the FW up to 500 second (50*1000*10usec) */
+	} while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
+					      MISCS_REG_GENERIC_POR_0)) &&
+		 (cnt++ < QED_MCP_RESET_RETRIES));
+
+	if (org_mcp_reset_seq !=
+	    qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+		DP_VERBOSE(p_hwfn, QED_MSG_SP,
+			   "MCP was reset after %d usec\n", cnt * delay);
+	} else {
+		DP_ERR(p_hwfn, "Failed to reset MCP\n");
+		rc = -EAGAIN;
+	}
+
+	return rc;
+}
+
+static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
+			  struct qed_ptt *p_ptt,
+			  u32 cmd,
+			  u32 param,
+			  u32 *o_mcp_resp,
+			  u32 *o_mcp_param)
+{
+	u8 delay = CHIP_MCP_RESP_ITER_US;
+	u32 seq, cnt = 1, actual_mb_seq;
+	int rc = 0;
+
+	/* Get actual driver mailbox sequence */
+	actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+			DRV_MSG_SEQ_NUMBER_MASK;
+
+	/* Use MCP history register to check if MCP reset occurred between
+	 * init time and now.
+	 */
+	if (p_hwfn->mcp_info->mcp_hist !=
+	    qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+		DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
+		qed_load_mcp_offsets(p_hwfn, p_ptt);
+		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+	}
+	seq = ++p_hwfn->mcp_info->drv_mb_seq;
+
+	/* Set drv param */
+	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+
+	/* Set drv command along with the updated sequence */
+	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SP,
+		   "wrote command (%x) to MFW MB param 0x%08x\n",
+		   (cmd | seq), param);
+
+	do {
+		/* Wait for MFW response */
+		udelay(delay);
+		*o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+
+		/* Give the FW up to 5 second (500*10ms) */
+	} while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
+		 (cnt++ < QED_DRV_MB_MAX_RETRIES));
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SP,
+		   "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+		   cnt * delay, *o_mcp_resp, seq);
+
+	/* Is this a reply to our command? */
+	if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
+		*o_mcp_resp &= FW_MSG_CODE_MASK;
+		/* Get the MCP param */
+		*o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+	} else {
+		/* FW BUG! */
+		DP_ERR(p_hwfn, "MFW failed to respond!\n");
+		*o_mcp_resp = 0;
+		rc = -EAGAIN;
+	}
+	return rc;
+}
+
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+		struct qed_ptt *p_ptt,
+		u32 cmd,
+		u32 param,
+		u32 *o_mcp_resp,
+		u32 *o_mcp_param)
+{
+	int rc = 0;
+
+	/* MCP not initialized */
+	if (!qed_mcp_is_init(p_hwfn)) {
+		DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+		return -EBUSY;
+	}
+
+	/* Lock Mutex to ensure only single thread is
+	 * accessing the MCP at one time
+	 */
+	mutex_lock(&p_hwfn->mcp_info->mutex);
+	rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+			    o_mcp_resp, o_mcp_param);
+	/* Release Mutex */
+	mutex_unlock(&p_hwfn->mcp_info->mutex);
+
+	return rc;
+}
+
+static void qed_mcp_set_drv_ver(struct qed_dev *cdev,
+				struct qed_hwfn *p_hwfn,
+				struct qed_ptt *p_ptt)
+{
+	u32 i;
+
+	/* Copy version string to MCP */
+	for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++)
+		DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i],
+			  *(u32 *)&cdev->ver_str[i * sizeof(u32)]);
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt,
+		     u32 *p_load_code)
+{
+	struct qed_dev *cdev = p_hwfn->cdev;
+	u32 param;
+	int rc;
+
+	if (!qed_mcp_is_init(p_hwfn)) {
+		DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+		return -EBUSY;
+	}
+
+	/* Save driver's version to shmem */
+	qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
+		   p_hwfn->mcp_info->drv_mb_seq,
+		   p_hwfn->mcp_info->drv_pulse_seq);
+
+	/* Load Request */
+	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
+			 (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+			  cdev->drv_type),
+			 p_load_code, &param);
+
+	/* if mcp fails to respond we must abort */
+	if (rc) {
+		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+		return rc;
+	}
+
+	/* If MFW refused (e.g. other port is in diagnostic mode) we
+	 * must abort. This can happen in the following cases:
+	 * - Other port is in diagnostic mode
+	 * - Previously loaded function on the engine is not compliant with
+	 *   the requester.
+	 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
+	 *      -
+	 */
+	if (!(*p_load_code) ||
+	    ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
+	    ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
+	    ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
+		DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
+				       struct qed_ptt *p_ptt,
+				       bool b_reset)
+{
+	struct qed_mcp_link_state *p_link;
+	u32 status = 0;
+
+	p_link = &p_hwfn->mcp_info->link_output;
+	memset(p_link, 0, sizeof(*p_link));
+	if (!b_reset) {
+		status = qed_rd(p_hwfn, p_ptt,
+				p_hwfn->mcp_info->port_addr +
+				offsetof(struct public_port, link_status));
+		DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
+			   "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
+			   status,
+			   (u32)(p_hwfn->mcp_info->port_addr +
+				 offsetof(struct public_port,
+					  link_status)));
+	} else {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+			   "Resetting link indications\n");
+		return;
+	}
+
+	p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+
+	p_link->full_duplex = true;
+	switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+	case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+		p_link->speed = 100000;
+		break;
+	case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+		p_link->speed = 50000;
+		break;
+	case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+		p_link->speed = 40000;
+		break;
+	case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+		p_link->speed = 25000;
+		break;
+	case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+		p_link->speed = 20000;
+		break;
+	case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+		p_link->speed = 10000;
+		break;
+	case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+		p_link->full_duplex = false;
+	/* Fall-through */
+	case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+		p_link->speed = 1000;
+		break;
+	default:
+		p_link->speed = 0;
+	}
+
+	/* Correct speed according to bandwidth allocation */
+	if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
+		p_link->speed = p_link->speed *
+				p_hwfn->mcp_info->func_info.bandwidth_max /
+				100;
+		qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+			       p_link->speed);
+		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+			   "Configured MAX bandwidth to be %08x Mb/sec\n",
+			   p_link->speed);
+	}
+
+	p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+	p_link->an_complete = !!(status &
+				 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+	p_link->parallel_detection = !!(status &
+					LINK_STATUS_PARALLEL_DETECTION_USED);
+	p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+	p_link->partner_adv_speed |=
+		(status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+		QED_LINK_PARTNER_SPEED_1G_FD : 0;
+	p_link->partner_adv_speed |=
+		(status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+		QED_LINK_PARTNER_SPEED_1G_HD : 0;
+	p_link->partner_adv_speed |=
+		(status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+		QED_LINK_PARTNER_SPEED_10G : 0;
+	p_link->partner_adv_speed |=
+		(status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+		QED_LINK_PARTNER_SPEED_20G : 0;
+	p_link->partner_adv_speed |=
+		(status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+		QED_LINK_PARTNER_SPEED_40G : 0;
+	p_link->partner_adv_speed |=
+		(status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+		QED_LINK_PARTNER_SPEED_50G : 0;
+	p_link->partner_adv_speed |=
+		(status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+		QED_LINK_PARTNER_SPEED_100G : 0;
+
+	p_link->partner_tx_flow_ctrl_en =
+		!!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+	p_link->partner_rx_flow_ctrl_en =
+		!!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+	switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+	case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+		p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
+		break;
+	case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+		p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
+		break;
+	case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+		p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
+		break;
+	default:
+		p_link->partner_adv_pause = 0;
+	}
+
+	p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+	qed_link_update(p_hwfn);
+}
+
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt,
+		     bool b_up)
+{
+	struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+	u32 param = 0, reply = 0, cmd;
+	struct pmm_phy_cfg phy_cfg;
+	int rc = 0;
+	u32 i;
+
+	if (!qed_mcp_is_init(p_hwfn)) {
+		DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+		return -EBUSY;
+	}
+
+	/* Set the shmem configuration according to params */
+	memset(&phy_cfg, 0, sizeof(phy_cfg));
+	cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+	if (!params->speed.autoneg)
+		phy_cfg.speed = params->speed.forced_speed;
+	phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
+	phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
+	phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+	phy_cfg.adv_speed = params->speed.advertised_speeds;
+	phy_cfg.loopback_mode = params->loopback_mode;
+
+	/* Write the requested configuration to shmem */
+	for (i = 0; i < sizeof(phy_cfg); i += 4)
+		qed_wr(p_hwfn, p_ptt,
+		       p_hwfn->mcp_info->drv_mb_addr +
+		       offsetof(struct public_drv_mb, union_data) + i,
+		       ((u32 *)&phy_cfg)[i >> 2]);
+
+	if (b_up) {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+			   "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
+			   phy_cfg.speed,
+			   phy_cfg.pause,
+			   phy_cfg.adv_speed,
+			   phy_cfg.loopback_mode,
+			   phy_cfg.feature_config_flags);
+	} else {
+		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+			   "Resetting link\n");
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
+		   p_hwfn->mcp_info->drv_mb_seq,
+		   p_hwfn->mcp_info->drv_pulse_seq);
+
+	/* Load Request */
+	rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, &param);
+
+	/* if mcp fails to respond we must abort */
+	if (rc) {
+		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+		return rc;
+	}
+
+	/* Reset the link status if needed */
+	if (!b_up)
+		qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
+
+	return 0;
+}
+
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+			  struct qed_ptt *p_ptt)
+{
+	struct qed_mcp_info *info = p_hwfn->mcp_info;
+	int rc = 0;
+	bool found = false;
+	u16 i;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
+
+	/* Read Messages from MFW */
+	qed_mcp_read_mb(p_hwfn, p_ptt);
+
+	/* Compare current messages to old ones */
+	for (i = 0; i < info->mfw_mb_length; i++) {
+		if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+			continue;
+
+		found = true;
+
+		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+			   "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+			   i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+		switch (i) {
+		case MFW_DRV_MSG_LINK_CHANGE:
+			qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
+			break;
+		default:
+			DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
+			rc = -EINVAL;
+		}
+	}
+
+	/* ACK everything */
+	for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+		__be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
+
+		/* MFW expect answer in BE, so we force write in that format */
+		qed_wr(p_hwfn, p_ptt,
+		       info->mfw_mb_addr + sizeof(u32) +
+		       MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+		       sizeof(u32) + i * sizeof(u32),
+		       (__force u32)val);
+	}
+
+	if (!found) {
+		DP_NOTICE(p_hwfn,
+			  "Received an MFW message indication but no new message!\n");
+		rc = -EINVAL;
+	}
+
+	/* Copy the new mfw messages into the shadow */
+	memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+	return rc;
+}
+
+int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
+			u32 *p_mfw_ver)
+{
+	struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+	struct qed_ptt *p_ptt;
+	u32 global_offsize;
+
+	p_ptt = qed_ptt_acquire(p_hwfn);
+	if (!p_ptt)
+		return -EBUSY;
+
+	global_offsize = qed_rd(p_hwfn, p_ptt,
+				SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
+						     public_base,
+						     PUBLIC_GLOBAL));
+	*p_mfw_ver = qed_rd(p_hwfn, p_ptt,
+			    SECTION_ADDR(global_offsize, 0) +
+			    offsetof(struct public_global, mfw_ver));
+
+	qed_ptt_release(p_hwfn, p_ptt);
+
+	return 0;
+}
+
+int qed_mcp_get_media_type(struct qed_dev *cdev,
+			   u32 *p_media_type)
+{
+	struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
+	struct qed_ptt  *p_ptt;
+
+	if (!qed_mcp_is_init(p_hwfn)) {
+		DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+		return -EBUSY;
+	}
+
+	*p_media_type = MEDIA_UNSPECIFIED;
+
+	p_ptt = qed_ptt_acquire(p_hwfn);
+	if (!p_ptt)
+		return -EBUSY;
+
+	*p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+			       offsetof(struct public_port, media_type));
+
+	qed_ptt_release(p_hwfn, p_ptt);
+
+	return 0;
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+				  struct qed_ptt *p_ptt,
+				  struct public_func *p_data,
+				  int pfid)
+{
+	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+					PUBLIC_FUNC);
+	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+	u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+	u32 i, size;
+
+	memset(p_data, 0, sizeof(*p_data));
+
+	size = min_t(u32, sizeof(*p_data),
+		     QED_SECTION_SIZE(mfw_path_offsize));
+	for (i = 0; i < size / sizeof(u32); i++)
+		((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+					    func_addr + (i << 2));
+
+	return size;
+}
+
+static int
+qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
+			struct public_func *p_info,
+			enum qed_pci_personality *p_proto)
+{
+	int rc = 0;
+
+	switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+	case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+		*p_proto = QED_PCI_ETH;
+		break;
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+				 struct qed_ptt *p_ptt)
+{
+	struct qed_mcp_function_info *info;
+	struct public_func shmem_info;
+
+	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+			       MCP_PF_ID(p_hwfn));
+	info = &p_hwfn->mcp_info->func_info;
+
+	info->pause_on_host = (shmem_info.config &
+			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+	if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
+				    &info->protocol)) {
+		DP_ERR(p_hwfn, "Unknown personality %08x\n",
+		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+		return -EINVAL;
+	}
+
+	if (p_hwfn->cdev->mf_mode != SF) {
+		info->bandwidth_min = (shmem_info.config &
+				       FUNC_MF_CFG_MIN_BW_MASK) >>
+				      FUNC_MF_CFG_MIN_BW_SHIFT;
+		if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
+			DP_INFO(p_hwfn,
+				"bandwidth minimum out of bounds [%02x]. Set to 1\n",
+				info->bandwidth_min);
+			info->bandwidth_min = 1;
+		}
+
+		info->bandwidth_max = (shmem_info.config &
+				       FUNC_MF_CFG_MAX_BW_MASK) >>
+				      FUNC_MF_CFG_MAX_BW_SHIFT;
+		if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
+			DP_INFO(p_hwfn,
+				"bandwidth maximum out of bounds [%02x]. Set to 100\n",
+				info->bandwidth_max);
+			info->bandwidth_max = 100;
+		}
+	}
+
+	if (shmem_info.mac_upper || shmem_info.mac_lower) {
+		info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
+		info->mac[1] = (u8)(shmem_info.mac_upper);
+		info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
+		info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
+		info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
+		info->mac[5] = (u8)(shmem_info.mac_lower);
+	} else {
+		DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
+	}
+
+	info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
+			 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
+	info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
+			 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
+
+	info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+	DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
+		   "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
+		info->pause_on_host, info->protocol,
+		info->bandwidth_min, info->bandwidth_max,
+		info->mac[0], info->mac[1], info->mac[2],
+		info->mac[3], info->mac[4], info->mac[5],
+		info->wwn_port, info->wwn_node, info->ovlan);
+
+	return 0;
+}
+
+struct qed_mcp_link_params
+*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
+{
+	if (!p_hwfn || !p_hwfn->mcp_info)
+		return NULL;
+	return &p_hwfn->mcp_info->link_input;
+}
+
+struct qed_mcp_link_state
+*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
+{
+	if (!p_hwfn || !p_hwfn->mcp_info)
+		return NULL;
+	return &p_hwfn->mcp_info->link_output;
+}
+
+struct qed_mcp_link_capabilities
+*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
+{
+	if (!p_hwfn || !p_hwfn->mcp_info)
+		return NULL;
+	return &p_hwfn->mcp_info->link_capabilities;
+}
+
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+		  struct qed_ptt *p_ptt)
+{
+	u32 resp = 0, param = 0;
+	int rc;
+
+	rc = qed_mcp_cmd(p_hwfn, p_ptt,
+			 DRV_MSG_CODE_NIG_DRAIN, 100,
+			 &resp, &param);
+
+	/* Wait for the drain to complete before returning */
+	msleep(120);
+
+	return rc;
+}
+
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+			   struct qed_ptt *p_ptt,
+			   u32 *p_flash_size)
+{
+	u32 flash_size;
+
+	flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+	flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+		      MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+	flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+
+	*p_flash_size = flash_size;
+
+	return 0;
+}
+
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+			 struct qed_ptt *p_ptt,
+			 struct qed_mcp_drv_version *p_ver)
+{
+	int rc = 0;
+	u32 param = 0, reply = 0, i;
+
+	if (!qed_mcp_is_init(p_hwfn)) {
+		DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
+		return -EBUSY;
+	}
+
+	DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version,
+		  p_ver->version);
+	/* Copy version string to shmem */
+	for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) {
+		DRV_MB_WR(p_hwfn, p_ptt,
+			  union_data.drv_version.name[i * sizeof(u32)],
+			  *(u32 *)&p_ver->name[i * sizeof(u32)]);
+	}
+
+	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply,
+			 &param);
+	if (rc) {
+		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+		return rc;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
new file mode 100644
index 0000000..dbaae58
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -0,0 +1,369 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_MCP_H
+#define _QED_MCP_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "qed_hsi.h"
+
+struct qed_mcp_link_speed_params {
+	bool    autoneg;
+	u32     advertised_speeds;      /* bitmask of DRV_SPEED_CAPABILITY */
+	u32     forced_speed;	   /* In Mb/s */
+};
+
+struct qed_mcp_link_pause_params {
+	bool    autoneg;
+	bool    forced_rx;
+	bool    forced_tx;
+};
+
+struct qed_mcp_link_params {
+	struct qed_mcp_link_speed_params	speed;
+	struct qed_mcp_link_pause_params	pause;
+	u32				     loopback_mode;
+};
+
+struct qed_mcp_link_capabilities {
+	u32 speed_capabilities;
+};
+
+struct qed_mcp_link_state {
+	bool    link_up;
+
+	u32     speed; /* In Mb/s */
+	bool    full_duplex;
+
+	bool    an;
+	bool    an_complete;
+	bool    parallel_detection;
+	bool    pfc_enabled;
+
+#define QED_LINK_PARTNER_SPEED_1G_HD    BIT(0)
+#define QED_LINK_PARTNER_SPEED_1G_FD    BIT(1)
+#define QED_LINK_PARTNER_SPEED_10G      BIT(2)
+#define QED_LINK_PARTNER_SPEED_20G      BIT(3)
+#define QED_LINK_PARTNER_SPEED_40G      BIT(4)
+#define QED_LINK_PARTNER_SPEED_50G      BIT(5)
+#define QED_LINK_PARTNER_SPEED_100G     BIT(6)
+	u32     partner_adv_speed;
+
+	bool    partner_tx_flow_ctrl_en;
+	bool    partner_rx_flow_ctrl_en;
+
+#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1)
+#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
+#define QED_LINK_PARTNER_BOTH_PAUSE (3)
+	u8      partner_adv_pause;
+
+	bool    sfp_tx_fault;
+};
+
+struct qed_mcp_function_info {
+	u8				pause_on_host;
+
+	enum qed_pci_personality	protocol;
+
+	u8				bandwidth_min;
+	u8				bandwidth_max;
+
+	u8				mac[ETH_ALEN];
+
+	u64				wwn_port;
+	u64				wwn_node;
+
+#define QED_MCP_VLAN_UNSET              (0xffff)
+	u16				ovlan;
+};
+
+struct qed_mcp_nvm_common {
+	u32	offset;
+	u32	param;
+	u32	resp;
+	u32	cmd;
+};
+
+struct qed_mcp_drv_version {
+	u32	version;
+	u8	name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+/**
+ * @brief - returns the link params of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link params
+ */
+struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
+
+/**
+ * @brief - return the link state of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link state
+ */
+struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
+
+/**
+ * @brief - return the link capabilities of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link capabilities
+ */
+struct qed_mcp_link_capabilities
+	*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief Request the MFW to set the the link according to 'link_input'.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_up - raise link if `true'. Reset link if `false'.
+ *
+ * @return int
+ */
+int qed_mcp_set_link(struct qed_hwfn   *p_hwfn,
+		     struct qed_ptt     *p_ptt,
+		     bool               b_up);
+
+/**
+ * @brief Get the management firmware version value
+ *
+ * @param cdev       - qed dev pointer
+ * @param mfw_ver    - mfw version value
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
+			u32 *mfw_ver);
+
+/**
+ * @brief Get media type value of the port.
+ *
+ * @param cdev      - qed dev pointer
+ * @param mfw_ver    - media type value
+ *
+ * @return int -
+ *      0 - Operation was successul.
+ *      -EBUSY - Operation failed
+ */
+int qed_mcp_get_media_type(struct qed_dev      *cdev,
+			   u32                  *media_type);
+
+/**
+ * @brief General function for sending commands to the MCP
+ *        mailbox. It acquire mutex lock for the entire
+ *        operation, from sending the request until the MCP
+ *        response. Waiting for MCP response will be checked up
+ *        to 5 seconds every 5ms.
+ *
+ * @param p_hwfn     - hw function
+ * @param p_ptt      - PTT required for register access
+ * @param cmd        - command to be sent to the MCP.
+ * @param param      - Optional param
+ * @param o_mcp_resp - The MCP response code (exclude sequence).
+ * @param o_mcp_param- Optional parameter provided by the MCP
+ *                     response
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+		struct qed_ptt *p_ptt,
+		u32 cmd,
+		u32 param,
+		u32 *o_mcp_resp,
+		u32 *o_mcp_param);
+
+/**
+ * @brief - drains the nig, allowing completion to pass in case of pauses.
+ *          (Should be called only from sleepable context)
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+		  struct qed_ptt *p_ptt);
+
+/**
+ * @brief Get the flash size value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_flash_size  - flash size in bytes to be filled.
+ *
+ * @return int - 0 - operation was successul.
+ */
+int qed_mcp_get_flash_size(struct qed_hwfn     *p_hwfn,
+			   struct qed_ptt       *p_ptt,
+			   u32 *p_flash_size);
+
+/**
+ * @brief Send driver version to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param version - Version value
+ * @param name - Protocol driver name
+ *
+ * @return int - 0 - operation was successul.
+ */
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+			 struct qed_ptt *p_ptt,
+			 struct qed_mcp_drv_version *p_ver);
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in qed_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ?	       \
+					    ((rel_pfid) |		       \
+					     ((p_hwfn)->abs_pf_id & 1) << 3) : \
+					    rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+/* TODO - this is only correct as long as only BB is supported, and
+ * no port-swapping is implemented; Afterwards we'll need to fix it.
+ */
+#define MFW_PORT(_p_hwfn)       ((_p_hwfn)->abs_pf_id %	\
+				 ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+struct qed_mcp_info {
+	struct mutex				mutex; /* MCP access lock */
+	u32					public_base;
+	u32					drv_mb_addr;
+	u32					mfw_mb_addr;
+	u32					port_addr;
+	u16					drv_mb_seq;
+	u16					drv_pulse_seq;
+	struct qed_mcp_link_params		link_input;
+	struct qed_mcp_link_state		link_output;
+	struct qed_mcp_link_capabilities	link_capabilities;
+	struct qed_mcp_function_info		func_info;
+	u8					*mfw_mb_cur;
+	u8					*mfw_mb_shadow;
+	u16					mfw_mb_length;
+	u16					mcp_hist;
+};
+
+/**
+ * @brief Initialize the interface with the MCP
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt);
+
+/**
+ * @brief Initialize the port interface with the MCP
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * Can only be called after `num_ports_in_engines' is set
+ */
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+			   struct qed_ptt *p_ptt);
+/**
+ * @brief Releases resources allocated during the init process.
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int
+ */
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief This function is called from the DPC context. After
+ * pointing PTT to the mfw mb, check for events sent by the MCP
+ * to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @param p_hwfn - HW function
+ * @param p_ptt - PTT required for register access
+ * @return int - 0 - operation
+ * was successul.
+ */
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+			  struct qed_ptt *p_ptt);
+
+/**
+ * @brief Sends a LOAD_REQ to the MFW, and in case operation
+ *        succeed, returns whether this PF is the first on the
+ *        chip/engine/port or function. This function should be
+ *        called when driver is ready to accept MFW events after
+ *        Storms initializations are done.
+ *
+ * @param p_hwfn       - hw function
+ * @param p_ptt        - PTT required for register access
+ * @param p_load_code  - The MCP response param containing one
+ *      of the following:
+ *      FW_MSG_CODE_DRV_LOAD_ENGINE
+ *      FW_MSG_CODE_DRV_LOAD_PORT
+ *      FW_MSG_CODE_DRV_LOAD_FUNCTION
+ * @return int -
+ *      0 - Operation was successul.
+ *      -EBUSY - Operation failed
+ */
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt,
+		     u32 *p_load_code);
+
+/**
+ * @brief Read the MFW mailbox into Current buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+		     struct qed_ptt *p_ptt);
+
+/**
+ * @brief - calls during init to read shmem of all function-related info.
+ *
+ * @param p_hwfn
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+				 struct qed_ptt *p_ptt);
+
+/**
+ * @brief - Reset the MCP using mailbox command.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+		  struct qed_ptt *p_ptt);
+
+/**
+ * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ *
+ * @param p_hwfn
+ *
+ * @return true iff MFW is running and mcp_info is initialized
+ */
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
new file mode 100644
index 0000000..7a5ce59
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -0,0 +1,366 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef REG_ADDR_H
+#define REG_ADDR_H
+
+#define  CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+	0
+
+#define  CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE		( \
+		0xfff << 0)
+
+#define  CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+	12
+
+#define  CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE		( \
+		0xfff << 12)
+
+#define  CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+	24
+
+#define  CDU_REG_CID_ADDR_PARAMS_NCIB			( \
+		0xff << 24)
+
+#define  XSDM_REG_OPERATION_GEN \
+	0xf80408UL
+#define  NIG_REG_RX_BRB_OUT_EN \
+	0x500e18UL
+#define  NIG_REG_STORM_OUT_EN \
+	0x500e08UL
+#define  PSWRQ2_REG_L2P_VALIDATE_VFID \
+	0x240c50UL
+#define  PGLUE_B_REG_USE_CLIENTID_IN_TAG	\
+	0x2aae04UL
+#define  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER	\
+	0x2aa16cUL
+#define  BAR0_MAP_REG_MSDM_RAM \
+	0x1d00000UL
+#define  BAR0_MAP_REG_USDM_RAM \
+	0x1d80000UL
+#define  BAR0_MAP_REG_PSDM_RAM \
+	0x1f00000UL
+#define  BAR0_MAP_REG_TSDM_RAM \
+	0x1c80000UL
+#define  NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+	0x5011f4UL
+#define  PRS_REG_SEARCH_TCP \
+	0x1f0400UL
+#define  PRS_REG_SEARCH_UDP \
+	0x1f0404UL
+#define  PRS_REG_SEARCH_FCOE \
+	0x1f0408UL
+#define  PRS_REG_SEARCH_ROCE \
+	0x1f040cUL
+#define  PRS_REG_SEARCH_OPENFLOW	\
+	0x1f0434UL
+#define  TM_REG_PF_ENABLE_CONN \
+	0x2c043cUL
+#define  TM_REG_PF_ENABLE_TASK \
+	0x2c0444UL
+#define  TM_REG_PF_SCAN_ACTIVE_CONN \
+	0x2c04fcUL
+#define  TM_REG_PF_SCAN_ACTIVE_TASK \
+	0x2c0500UL
+#define  IGU_REG_LEADING_EDGE_LATCH \
+	0x18082cUL
+#define  IGU_REG_TRAILING_EDGE_LATCH \
+	0x180830UL
+#define  QM_REG_USG_CNT_PF_TX \
+	0x2f2eacUL
+#define  QM_REG_USG_CNT_PF_OTHER	\
+	0x2f2eb0UL
+#define  DORQ_REG_PF_DB_ENABLE \
+	0x100508UL
+#define  QM_REG_PF_EN \
+	0x2f2ea4UL
+#define  TCFC_REG_STRONG_ENABLE_PF \
+	0x2d0708UL
+#define  CCFC_REG_STRONG_ENABLE_PF \
+	0x2e0708UL
+#define  PGLUE_B_REG_PGL_ADDR_88_F0 \
+	0x2aa404UL
+#define  PGLUE_B_REG_PGL_ADDR_8C_F0 \
+	0x2aa408UL
+#define  PGLUE_B_REG_PGL_ADDR_90_F0 \
+	0x2aa40cUL
+#define  PGLUE_B_REG_PGL_ADDR_94_F0 \
+	0x2aa410UL
+#define  PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+	0x2aa138UL
+#define  PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+	0x2aa174UL
+#define  MISC_REG_GEN_PURP_CR0 \
+	0x008c80UL
+#define  MCP_REG_SCRATCH	\
+	0xe20000UL
+#define  CNIG_REG_NW_PORT_MODE_BB_B0 \
+	0x218200UL
+#define  MISCS_REG_CHIP_NUM \
+	0x00976cUL
+#define  MISCS_REG_CHIP_REV \
+	0x009770UL
+#define  MISCS_REG_CMT_ENABLED_FOR_PAIR \
+	0x00971cUL
+#define  MISCS_REG_CHIP_TEST_REG	\
+	0x009778UL
+#define  MISCS_REG_CHIP_METAL \
+	0x009774UL
+#define  BRB_REG_HEADER_SIZE \
+	0x340804UL
+#define  BTB_REG_HEADER_SIZE \
+	0xdb0804UL
+#define  CAU_REG_LONG_TIMEOUT_THRESHOLD \
+	0x1c0708UL
+#define  CCFC_REG_ACTIVITY_COUNTER \
+	0x2e8800UL
+#define  CDU_REG_CID_ADDR_PARAMS	\
+	0x580900UL
+#define  DBG_REG_CLIENT_ENABLE \
+	0x010004UL
+#define  DMAE_REG_INIT \
+	0x00c000UL
+#define  DORQ_REG_IFEN \
+	0x100040UL
+#define  GRC_REG_TIMEOUT_EN \
+	0x050404UL
+#define  IGU_REG_BLOCK_CONFIGURATION \
+	0x180040UL
+#define  MCM_REG_INIT \
+	0x1200000UL
+#define  MCP2_REG_DBG_DWORD_ENABLE \
+	0x052404UL
+#define  MISC_REG_PORT_MODE \
+	0x008c00UL
+#define  MISCS_REG_CLK_100G_MODE	\
+	0x009070UL
+#define  MSDM_REG_ENABLE_IN1 \
+	0xfc0004UL
+#define  MSEM_REG_ENABLE_IN \
+	0x1800004UL
+#define  NIG_REG_CM_HDR \
+	0x500840UL
+#define  NCSI_REG_CONFIG	\
+	0x040200UL
+#define  PBF_REG_INIT \
+	0xd80000UL
+#define  PTU_REG_ATC_INIT_ARRAY \
+	0x560000UL
+#define  PCM_REG_INIT \
+	0x1100000UL
+#define  PGLUE_B_REG_ADMIN_PER_PF_REGION	\
+	0x2a9000UL
+#define  PRM_REG_DISABLE_PRM \
+	0x230000UL
+#define  PRS_REG_SOFT_RST \
+	0x1f0000UL
+#define  PSDM_REG_ENABLE_IN1 \
+	0xfa0004UL
+#define  PSEM_REG_ENABLE_IN \
+	0x1600004UL
+#define  PSWRQ_REG_DBG_SELECT \
+	0x280020UL
+#define  PSWRQ2_REG_CDUT_P_SIZE \
+	0x24000cUL
+#define  PSWHST_REG_DISCARD_INTERNAL_WRITES \
+	0x2a0040UL
+#define  PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+	0x29e050UL
+#define  PSWRD_REG_DBG_SELECT \
+	0x29c040UL
+#define  PSWRD2_REG_CONF11 \
+	0x29d064UL
+#define  PSWWR_REG_USDM_FULL_TH \
+	0x29a040UL
+#define  PSWWR2_REG_CDU_FULL_TH2	\
+	0x29b040UL
+#define  QM_REG_MAXPQSIZE_0 \
+	0x2f0434UL
+#define  RSS_REG_RSS_INIT_EN \
+	0x238804UL
+#define  RDIF_REG_STOP_ON_ERROR \
+	0x300040UL
+#define  SRC_REG_SOFT_RST \
+	0x23874cUL
+#define  TCFC_REG_ACTIVITY_COUNTER \
+	0x2d8800UL
+#define  TCM_REG_INIT \
+	0x1180000UL
+#define  TM_REG_PXP_READ_DATA_FIFO_INIT \
+	0x2c0014UL
+#define  TSDM_REG_ENABLE_IN1 \
+	0xfb0004UL
+#define  TSEM_REG_ENABLE_IN \
+	0x1700004UL
+#define  TDIF_REG_STOP_ON_ERROR \
+	0x310040UL
+#define  UCM_REG_INIT \
+	0x1280000UL
+#define  UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+	0x051004UL
+#define  USDM_REG_ENABLE_IN1 \
+	0xfd0004UL
+#define  USEM_REG_ENABLE_IN \
+	0x1900004UL
+#define  XCM_REG_INIT \
+	0x1000000UL
+#define  XSDM_REG_ENABLE_IN1 \
+	0xf80004UL
+#define  XSEM_REG_ENABLE_IN \
+	0x1400004UL
+#define  YCM_REG_INIT \
+	0x1080000UL
+#define  YSDM_REG_ENABLE_IN1 \
+	0xf90004UL
+#define  YSEM_REG_ENABLE_IN \
+	0x1500004UL
+#define  XYLD_REG_SCBD_STRICT_PRIO \
+	0x4c0000UL
+#define  TMLD_REG_SCBD_STRICT_PRIO \
+	0x4d0000UL
+#define  MULD_REG_SCBD_STRICT_PRIO \
+	0x4e0000UL
+#define  YULD_REG_SCBD_STRICT_PRIO \
+	0x4c8000UL
+#define  MISC_REG_SHARED_MEM_ADDR \
+	0x008c20UL
+#define  DMAE_REG_GO_C0 \
+	0x00c048UL
+#define  DMAE_REG_GO_C1 \
+	0x00c04cUL
+#define  DMAE_REG_GO_C2 \
+	0x00c050UL
+#define  DMAE_REG_GO_C3 \
+	0x00c054UL
+#define  DMAE_REG_GO_C4 \
+	0x00c058UL
+#define  DMAE_REG_GO_C5 \
+	0x00c05cUL
+#define  DMAE_REG_GO_C6 \
+	0x00c060UL
+#define  DMAE_REG_GO_C7 \
+	0x00c064UL
+#define  DMAE_REG_GO_C8 \
+	0x00c068UL
+#define  DMAE_REG_GO_C9 \
+	0x00c06cUL
+#define  DMAE_REG_GO_C10	\
+	0x00c070UL
+#define  DMAE_REG_GO_C11	\
+	0x00c074UL
+#define  DMAE_REG_GO_C12	\
+	0x00c078UL
+#define  DMAE_REG_GO_C13	\
+	0x00c07cUL
+#define  DMAE_REG_GO_C14	\
+	0x00c080UL
+#define  DMAE_REG_GO_C15	\
+	0x00c084UL
+#define  DMAE_REG_GO_C16	\
+	0x00c088UL
+#define  DMAE_REG_GO_C17	\
+	0x00c08cUL
+#define  DMAE_REG_GO_C18	\
+	0x00c090UL
+#define  DMAE_REG_GO_C19	\
+	0x00c094UL
+#define  DMAE_REG_GO_C20	\
+	0x00c098UL
+#define  DMAE_REG_GO_C21	\
+	0x00c09cUL
+#define  DMAE_REG_GO_C22	\
+	0x00c0a0UL
+#define  DMAE_REG_GO_C23	\
+	0x00c0a4UL
+#define  DMAE_REG_GO_C24	\
+	0x00c0a8UL
+#define  DMAE_REG_GO_C25	\
+	0x00c0acUL
+#define  DMAE_REG_GO_C26	\
+	0x00c0b0UL
+#define  DMAE_REG_GO_C27	\
+	0x00c0b4UL
+#define  DMAE_REG_GO_C28	\
+	0x00c0b8UL
+#define  DMAE_REG_GO_C29	\
+	0x00c0bcUL
+#define  DMAE_REG_GO_C30	\
+	0x00c0c0UL
+#define  DMAE_REG_GO_C31	\
+	0x00c0c4UL
+#define  DMAE_REG_CMD_MEM \
+	0x00c800UL
+#define  QM_REG_MAXPQSIZETXSEL_0	\
+	0x2f0440UL
+#define  QM_REG_SDMCMDREADY \
+	0x2f1e10UL
+#define  QM_REG_SDMCMDADDR \
+	0x2f1e04UL
+#define  QM_REG_SDMCMDDATALSB \
+	0x2f1e08UL
+#define  QM_REG_SDMCMDDATAMSB \
+	0x2f1e0cUL
+#define  QM_REG_SDMCMDGO	\
+	0x2f1e14UL
+#define  QM_REG_RLPFCRD \
+	0x2f4d80UL
+#define  QM_REG_RLPFINCVAL \
+	0x2f4c80UL
+#define  QM_REG_RLGLBLCRD \
+	0x2f4400UL
+#define  QM_REG_RLGLBLINCVAL \
+	0x2f3400UL
+#define  IGU_REG_ATTENTION_ENABLE \
+	0x18083cUL
+#define  IGU_REG_ATTN_MSG_ADDR_L	\
+	0x180820UL
+#define  IGU_REG_ATTN_MSG_ADDR_H	\
+	0x180824UL
+#define  MISC_REG_AEU_GENERAL_ATTN_0 \
+	0x008400UL
+#define  CAU_REG_SB_ADDR_MEMORY \
+	0x1c8000UL
+#define  CAU_REG_SB_VAR_MEMORY \
+	0x1c6000UL
+#define  CAU_REG_PI_MEMORY \
+	0x1d0000UL
+#define  IGU_REG_PF_CONFIGURATION \
+	0x180800UL
+#define  MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+	0x00849cUL
+#define  MISC_REG_AEU_MASK_ATTN_IGU \
+	0x008494UL
+#define  IGU_REG_CLEANUP_STATUS_0 \
+	0x180980UL
+#define  IGU_REG_CLEANUP_STATUS_1 \
+	0x180a00UL
+#define  IGU_REG_CLEANUP_STATUS_2 \
+	0x180a80UL
+#define  IGU_REG_CLEANUP_STATUS_3 \
+	0x180b00UL
+#define  IGU_REG_CLEANUP_STATUS_4 \
+	0x180b80UL
+#define  IGU_REG_COMMAND_REG_32LSB_DATA \
+	0x180840UL
+#define  IGU_REG_COMMAND_REG_CTRL \
+	0x180848UL
+#define  IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN	( \
+		0x1 << 1)
+#define  IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN	( \
+		0x1 << 0)
+#define  IGU_REG_MAPPING_MEMORY \
+	0x184000UL
+#define  MISCS_REG_GENERIC_POR_0	\
+	0x0096d4UL
+#define  MCP_REG_NVM_CFG4 \
+	0xe0642cUL
+#define  MCP_REG_NVM_CFG4_FLASH_SIZE	( \
+		0x7 << 0)
+#define  MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+	0
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
new file mode 100644
index 0000000..31a1f1e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -0,0 +1,360 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SP_H
+#define _QED_SP_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+
+enum spq_mode {
+	QED_SPQ_MODE_BLOCK,     /* Client will poll a designated mem. address */
+	QED_SPQ_MODE_CB,        /* Client supplies a callback */
+	QED_SPQ_MODE_EBLOCK,    /* QED should block until completion */
+};
+
+struct qed_spq_comp_cb {
+	void	(*function)(struct qed_hwfn *,
+			    void *,
+			    union event_ring_data *,
+			    u8 fw_return_code);
+	void	*cookie;
+};
+
+/**
+ * @brief qed_eth_cqe_completion - handles the completion of a
+ *        ramrod on the cqe ring
+ *
+ * @param p_hwfn
+ * @param cqe
+ *
+ * @return int
+ */
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+			   struct eth_slow_path_rx_cqe *cqe);
+
+/**
+ *  @file
+ *
+ *  QED Slow-hwfn queue interface
+ */
+
+union ramrod_data {
+	struct pf_start_ramrod_data pf_start;
+	struct rx_queue_start_ramrod_data rx_queue_start;
+	struct rx_queue_update_ramrod_data rx_queue_update;
+	struct rx_queue_stop_ramrod_data rx_queue_stop;
+	struct tx_queue_start_ramrod_data tx_queue_start;
+	struct tx_queue_stop_ramrod_data tx_queue_stop;
+	struct vport_start_ramrod_data vport_start;
+	struct vport_stop_ramrod_data vport_stop;
+	struct vport_update_ramrod_data vport_update;
+	struct vport_filter_update_ramrod_data vport_filter_update;
+};
+
+#define EQ_MAX_CREDIT   0xffffffff
+
+enum spq_priority {
+	QED_SPQ_PRIORITY_NORMAL,
+	QED_SPQ_PRIORITY_HIGH,
+};
+
+union qed_spq_req_comp {
+	struct qed_spq_comp_cb	cb;
+	u64			*done_addr;
+};
+
+struct qed_spq_comp_done {
+	u64	done;
+	u8	fw_return_code;
+};
+
+struct qed_spq_entry {
+	struct list_head		list;
+
+	u8				flags;
+
+	/* HSI slow path element */
+	struct slow_path_element	elem;
+
+	union ramrod_data		ramrod;
+
+	enum spq_priority		priority;
+
+	/* pending queue for this entry */
+	struct list_head		*queue;
+
+	enum spq_mode			comp_mode;
+	struct qed_spq_comp_cb		comp_cb;
+	struct qed_spq_comp_done	comp_done; /* SPQ_MODE_EBLOCK */
+};
+
+struct qed_eq {
+	struct qed_chain	chain;
+	u8			eq_sb_index;    /* index within the SB */
+	__le16			*p_fw_cons;     /* ptr to index value */
+};
+
+struct qed_consq {
+	struct qed_chain chain;
+};
+
+struct qed_spq {
+	spinlock_t		lock; /* SPQ lock */
+
+	struct list_head	unlimited_pending;
+	struct list_head	pending;
+	struct list_head	completion_pending;
+	struct list_head	free_pool;
+
+	struct qed_chain	chain;
+
+	/* allocated dma-able memory for spq entries (+ramrod data) */
+	dma_addr_t		p_phys;
+	struct qed_spq_entry	*p_virt;
+
+	/* Used as index for completions (returns on EQ by FW) */
+	u16			echo_idx;
+
+	/* Statistics */
+	u32			unlimited_pending_count;
+	u32			normal_count;
+	u32			high_count;
+	u32			comp_sent_count;
+	u32			comp_count;
+
+	u32			cid;
+};
+
+/**
+ * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
+ *        Pends it to the future list.
+ *
+ * @param p_hwfn
+ * @param p_req
+ *
+ * @return int
+ */
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+		 struct qed_spq_entry *p_ent,
+		 u8 *fw_return_code);
+
+/**
+ * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_spq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
+ *
+ * @param p_hwfn
+ */
+void qed_spq_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_spq_get_entry - Obtain an entrry from the spq
+ *        free pool list.
+ *
+ *
+ *
+ * @param p_hwfn
+ * @param pp_ent
+ *
+ * @return int
+ */
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+		  struct qed_spq_entry **pp_ent);
+
+/**
+ * @brief qed_spq_return_entry - Return an entry to spq free
+ *                                 pool list
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+			  struct qed_spq_entry *p_ent);
+/**
+ * @brief qed_eq_allocate - Allocates & initializes an EQ struct
+ *
+ * @param p_hwfn
+ * @param num_elem number of elements in the eq
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+			    u16 num_elem);
+
+/**
+ * @brief qed_eq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+		  struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_deallocate - deallocates the given EQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+		 struct qed_eq *p_eq);
+
+/**
+ * @brief qed_eq_prod_update - update the FW with default EQ producer
+ *
+ * @param p_hwfn
+ * @param prod
+ */
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+			u16 prod);
+
+/**
+ * @brief qed_eq_completion - Completes currently pending EQ elements
+ *
+ * @param p_hwfn
+ * @param cookie
+ *
+ * @return int
+ */
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+		      void *cookie);
+
+/**
+ * @brief qed_spq_completion - Completes a single event
+ *
+ * @param p_hwfn
+ * @param echo - echo value from cookie (used for determining completion)
+ * @param p_data - data from cookie (used in callback function if applicable)
+ *
+ * @return int
+ */
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+		       __le16 echo,
+		       u8 fw_return_code,
+		       union event_ring_data *p_data);
+
+/**
+ * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ *
+ * @param p_hwfn
+ *
+ * @return u32 - SPQ CID
+ */
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_alloc - Allocates & initializes an ConsQ
+ *        struct
+ *
+ * @param p_hwfn
+ *
+ * @return struct qed_eq* - a newly allocated structure; NULL upon error.
+ */
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_consq_setup - Reset the ConsQ to its start
+ *        state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+		     struct qed_consq *p_consq);
+
+/**
+ * @brief qed_consq_free - deallocates the given ConsQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+		    struct qed_consq *p_consq);
+
+/**
+ * @file
+ *
+ * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
+ */
+
+#define QED_SP_EQ_COMPLETION  0x01
+#define QED_SP_CQE_COMPLETION 0x02
+
+struct qed_sp_init_request_params {
+	size_t			ramrod_data_size;
+	enum spq_mode		comp_mode;
+	struct qed_spq_comp_cb *p_comp_data;
+};
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+			struct qed_spq_entry **pp_ent,
+			u32 cid,
+			u16 opaque_fid,
+			u8 cmd,
+			u8 protocol,
+			struct qed_sp_init_request_params *p_params);
+
+/**
+ * @brief qed_sp_pf_start - PF Function Start Ramrod
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ * @param p_hwfn
+ * @param mode
+ *
+ * @return int
+ */
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+		    enum mf_mode mode);
+
+/**
+ * @brief qed_sp_pf_stop - PF Function Stop Ramrod
+ *
+ * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+ * sent and the last completion written to the PFs Event Ring. This ramrod also
+ * deletes the context for the Slowhwfn connection on this PF.
+ *
+ * @note Not required for first packet.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
new file mode 100644
index 0000000..6f78791
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -0,0 +1,170 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+			struct qed_spq_entry **pp_ent,
+			u32 cid,
+			u16 opaque_fid,
+			u8 cmd,
+			u8 protocol,
+			struct qed_sp_init_request_params *p_params)
+{
+	int rc = -EINVAL;
+	struct qed_spq_entry *p_ent = NULL;
+	u32 opaque_cid = opaque_fid << 16 | cid;
+
+	if (!pp_ent)
+		return -ENOMEM;
+
+	rc = qed_spq_get_entry(p_hwfn, pp_ent);
+
+	if (rc != 0)
+		return rc;
+
+	p_ent = *pp_ent;
+
+	p_ent->elem.hdr.cid		= cpu_to_le32(opaque_cid);
+	p_ent->elem.hdr.cmd_id		= cmd;
+	p_ent->elem.hdr.protocol_id	= protocol;
+
+	p_ent->priority		= QED_SPQ_PRIORITY_NORMAL;
+	p_ent->comp_mode	= p_params->comp_mode;
+	p_ent->comp_done.done	= 0;
+
+	switch (p_ent->comp_mode) {
+	case QED_SPQ_MODE_EBLOCK:
+		p_ent->comp_cb.cookie = &p_ent->comp_done;
+		break;
+
+	case QED_SPQ_MODE_BLOCK:
+		if (!p_params->p_comp_data)
+			return -EINVAL;
+
+		p_ent->comp_cb.cookie = p_params->p_comp_data->cookie;
+		break;
+
+	case QED_SPQ_MODE_CB:
+		if (!p_params->p_comp_data)
+			p_ent->comp_cb.function = NULL;
+		else
+			p_ent->comp_cb = *p_params->p_comp_data;
+		break;
+
+	default:
+		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+			  p_ent->comp_mode);
+		return -EINVAL;
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+		   "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
+		   opaque_cid, cmd, protocol,
+		   (unsigned long)&p_ent->ramrod,
+		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+			   "MODE_CB"));
+	if (p_params->ramrod_data_size)
+		memset(&p_ent->ramrod, 0, p_params->ramrod_data_size);
+
+	return 0;
+}
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+		    enum mf_mode mode)
+{
+	struct qed_sp_init_request_params params;
+	struct pf_start_ramrod_data *p_ramrod = NULL;
+	u16 sb = qed_int_get_sp_sb_id(p_hwfn);
+	u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+	struct qed_spq_entry *p_ent = NULL;
+	int rc = -EINVAL;
+
+	/* update initial eq producer */
+	qed_eq_prod_update(p_hwfn,
+			   qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+	memset(&params, 0, sizeof(params));
+	params.ramrod_data_size = sizeof(*p_ramrod);
+	params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+	rc = qed_sp_init_request(p_hwfn,
+				 &p_ent,
+				 qed_spq_get_cid(p_hwfn),
+				 p_hwfn->hw_info.opaque_fid,
+				 COMMON_RAMROD_PF_START,
+				 PROTOCOLID_COMMON,
+				 &params);
+	if (rc)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.pf_start;
+
+	p_ramrod->event_ring_sb_id	= cpu_to_le16(sb);
+	p_ramrod->event_ring_sb_index	= sb_index;
+	p_ramrod->path_id		= QED_PATH_ID(p_hwfn);
+	p_ramrod->dont_log_ramrods	= 0;
+	p_ramrod->log_type_mask		= cpu_to_le16(0xf);
+	p_ramrod->mf_mode = mode;
+	p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+	/* Place EQ address in RAMROD */
+	p_ramrod->event_ring_pbl_addr.hi =
+			DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+	p_ramrod->event_ring_pbl_addr.lo =
+			DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table);
+	p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
+
+	p_ramrod->consolid_q_pbl_addr.hi =
+			DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+	p_ramrod->consolid_q_pbl_addr.lo =
+			DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table);
+
+	p_hwfn->hw_info.personality = PERSONALITY_ETH;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+		   "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n",
+		   sb, sb_index,
+		   (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf",
+		   p_ramrod->outer_tag);
+
+	return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
+{
+	struct qed_sp_init_request_params params;
+	struct qed_spq_entry *p_ent = NULL;
+	int rc = -EINVAL;
+
+	memset(&params, 0, sizeof(params));
+	params.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+	rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn),
+				 p_hwfn->hw_info.opaque_fid,
+				 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+				 &params);
+	if (rc)
+		return rc;
+
+	return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
new file mode 100644
index 0000000..7c0b845
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -0,0 +1,860 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+
+/***************************************************************************
+* Structures & Definitions
+***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
+#define SPQ_BLOCK_SLEEP_LENGTH          (1000)
+
+/***************************************************************************
+* Blocking Imp. (BLOCK/EBLOCK mode)
+***************************************************************************/
+static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
+				void *cookie,
+				union event_ring_data *data,
+				u8 fw_return_code)
+{
+	struct qed_spq_comp_done *comp_done;
+
+	comp_done = (struct qed_spq_comp_done *)cookie;
+
+	comp_done->done			= 0x1;
+	comp_done->fw_return_code	= fw_return_code;
+
+	/* make update visible to waiting thread */
+	smp_wmb();
+}
+
+static int qed_spq_block(struct qed_hwfn *p_hwfn,
+			 struct qed_spq_entry *p_ent,
+			 u8 *p_fw_ret)
+{
+	int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+	struct qed_spq_comp_done *comp_done;
+	int rc;
+
+	comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
+	while (sleep_count) {
+		/* validate we receive completion update */
+		smp_rmb();
+		if (comp_done->done == 1) {
+			if (p_fw_ret)
+				*p_fw_ret = comp_done->fw_return_code;
+			return 0;
+		}
+		usleep_range(5000, 10000);
+		sleep_count--;
+	}
+
+	DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+	rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+	if (rc != 0)
+		DP_NOTICE(p_hwfn, "MCP drain failed\n");
+
+	/* Retry after drain */
+	sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+	while (sleep_count) {
+		/* validate we receive completion update */
+		smp_rmb();
+		if (comp_done->done == 1) {
+			if (p_fw_ret)
+				*p_fw_ret = comp_done->fw_return_code;
+			return 0;
+		}
+		usleep_range(5000, 10000);
+		sleep_count--;
+	}
+
+	if (comp_done->done == 1) {
+		if (p_fw_ret)
+			*p_fw_ret = comp_done->fw_return_code;
+		return 0;
+	}
+
+	DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
+
+	return -EBUSY;
+}
+
+/***************************************************************************
+* SPQ entries inner API
+***************************************************************************/
+static int
+qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+		   struct qed_spq_entry *p_ent)
+{
+	p_ent->elem.hdr.echo = 0;
+	p_hwfn->p_spq->echo_idx++;
+	p_ent->flags = 0;
+
+	switch (p_ent->comp_mode) {
+	case QED_SPQ_MODE_EBLOCK:
+	case QED_SPQ_MODE_BLOCK:
+		p_ent->comp_cb.function = qed_spq_blocking_cb;
+		break;
+	case QED_SPQ_MODE_CB:
+		break;
+	default:
+		DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+			  p_ent->comp_mode);
+		return -EINVAL;
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+		   "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
+		   p_ent->elem.hdr.cid,
+		   p_ent->elem.hdr.cmd_id,
+		   p_ent->elem.hdr.protocol_id,
+		   p_ent->elem.data_ptr.hi,
+		   p_ent->elem.data_ptr.lo,
+		   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+			   QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+			   "MODE_CB"));
+
+	return 0;
+}
+
+/***************************************************************************
+* HSI access
+***************************************************************************/
+static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
+				  struct qed_spq *p_spq)
+{
+	u16				pq;
+	struct qed_cxt_info		cxt_info;
+	struct core_conn_context	*p_cxt;
+	union qed_qm_pq_params		pq_params;
+	int				rc;
+
+	cxt_info.iid = p_spq->cid;
+
+	rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+	if (rc < 0) {
+		DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
+			  p_spq->cid);
+		return;
+	}
+
+	p_cxt = cxt_info.p_cxt;
+
+	SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+	SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+		  XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+	SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+		  XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+
+	/* QM physical queue */
+	memset(&pq_params, 0, sizeof(pq_params));
+	pq_params.core.tc = LB_TC;
+	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+	p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
+
+	p_cxt->xstorm_st_context.spq_base_lo =
+		DMA_LO_LE(p_spq->chain.p_phys_addr);
+	p_cxt->xstorm_st_context.spq_base_hi =
+		DMA_HI_LE(p_spq->chain.p_phys_addr);
+
+	p_cxt->xstorm_st_context.consolid_base_addr.lo =
+		DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
+	p_cxt->xstorm_st_context.consolid_base_addr.hi =
+		DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+}
+
+static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
+			   struct qed_spq *p_spq,
+			   struct qed_spq_entry *p_ent)
+{
+	struct qed_chain		*p_chain = &p_hwfn->p_spq->chain;
+	struct slow_path_element	*elem;
+	struct core_db_data		db;
+
+	elem = qed_chain_produce(p_chain);
+	if (!elem) {
+		DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
+		return -EINVAL;
+	}
+
+	*elem = p_ent->elem; /* struct assignment */
+
+	/* send a doorbell on the slow hwfn session */
+	memset(&db, 0, sizeof(db));
+	SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+	SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+	SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
+		  DQ_XCM_CORE_SPQ_PROD_CMD);
+	db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+	/* validate producer is up to-date */
+	rmb();
+
+	db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
+
+	/* do not reorder */
+	barrier();
+
+	DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+
+	/* make sure doorbell is rang */
+	mmiowb();
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+		   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
+		   qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
+		   p_spq->cid, db.params, db.agg_flags,
+		   qed_chain_get_prod_idx(p_chain));
+
+	return 0;
+}
+
+/***************************************************************************
+* Asynchronous events
+***************************************************************************/
+static int
+qed_async_event_completion(struct qed_hwfn *p_hwfn,
+			   struct event_ring_entry *p_eqe)
+{
+	DP_NOTICE(p_hwfn,
+		  "Unknown Async completion for protocol: %d\n",
+		   p_eqe->protocol_id);
+	return -EINVAL;
+}
+
+/***************************************************************************
+* EQ API
+***************************************************************************/
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+			u16 prod)
+{
+	u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
+		   USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+
+	REG_WR16(p_hwfn, addr, prod);
+
+	/* keep prod updates ordered */
+	mmiowb();
+}
+
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+		      void *cookie)
+
+{
+	struct qed_eq *p_eq = cookie;
+	struct qed_chain *p_chain = &p_eq->chain;
+	int rc = 0;
+
+	/* take a snapshot of the FW consumer */
+	u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+	/* Need to guarantee the fw_cons index we use points to a usuable
+	 * element (to comply with our chain), so our macros would comply
+	 */
+	if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
+	    qed_chain_get_usable_per_page(p_chain))
+		fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
+
+	/* Complete current segment of eq entries */
+	while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
+		struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
+
+		if (!p_eqe) {
+			rc = -EINVAL;
+			break;
+		}
+
+		DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+			   "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
+			   p_eqe->opcode,
+			   p_eqe->protocol_id,
+			   p_eqe->reserved0,
+			   le16_to_cpu(p_eqe->echo),
+			   p_eqe->fw_return_code,
+			   p_eqe->flags);
+
+		if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+			if (qed_async_event_completion(p_hwfn, p_eqe))
+				rc = -EINVAL;
+		} else if (qed_spq_completion(p_hwfn,
+					      p_eqe->echo,
+					      p_eqe->fw_return_code,
+					      &p_eqe->data)) {
+			rc = -EINVAL;
+		}
+
+		qed_chain_recycle_consumed(p_chain);
+	}
+
+	qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
+
+	return rc;
+}
+
+struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
+			    u16 num_elem)
+{
+	struct qed_eq *p_eq;
+
+	/* Allocate EQ struct */
+	p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC);
+	if (!p_eq) {
+		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
+		return NULL;
+	}
+
+	/* Allocate and initialize EQ chain*/
+	if (qed_chain_alloc(p_hwfn->cdev,
+			    QED_CHAIN_USE_TO_PRODUCE,
+			    QED_CHAIN_MODE_PBL,
+			    num_elem,
+			    sizeof(union event_ring_element),
+			    &p_eq->chain)) {
+		DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
+		goto eq_allocate_fail;
+	}
+
+	/* register EQ completion on the SP SB */
+	qed_int_register_cb(p_hwfn,
+			    qed_eq_completion,
+			    p_eq,
+			    &p_eq->eq_sb_index,
+			    &p_eq->p_fw_cons);
+
+	return p_eq;
+
+eq_allocate_fail:
+	qed_eq_free(p_hwfn, p_eq);
+	return NULL;
+}
+
+void qed_eq_setup(struct qed_hwfn *p_hwfn,
+		  struct qed_eq *p_eq)
+{
+	qed_chain_reset(&p_eq->chain);
+}
+
+void qed_eq_free(struct qed_hwfn *p_hwfn,
+		 struct qed_eq *p_eq)
+{
+	if (!p_eq)
+		return;
+	qed_chain_free(p_hwfn->cdev, &p_eq->chain);
+	kfree(p_eq);
+}
+
+/***************************************************************************
+* CQE API - manipulate EQ functionality
+***************************************************************************/
+static int qed_cqe_completion(
+	struct qed_hwfn *p_hwfn,
+	struct eth_slow_path_rx_cqe *cqe,
+	enum protocol_type protocol)
+{
+	/* @@@tmp - it's possible we'll eventually want to handle some
+	 * actual commands that can arrive here, but for now this is only
+	 * used to complete the ramrod using the echo value on the cqe
+	 */
+	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
+}
+
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+			   struct eth_slow_path_rx_cqe *cqe)
+{
+	int rc;
+
+	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+	if (rc)
+		DP_NOTICE(p_hwfn,
+			  "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+			  cqe->ramrod_cmd_id);
+
+	return rc;
+}
+
+/***************************************************************************
+* Slow hwfn Queue (spq)
+***************************************************************************/
+void qed_spq_setup(struct qed_hwfn *p_hwfn)
+{
+	struct qed_spq		*p_spq	= p_hwfn->p_spq;
+	struct qed_spq_entry	*p_virt = NULL;
+	dma_addr_t		p_phys	= 0;
+	unsigned int		i	= 0;
+
+	INIT_LIST_HEAD(&p_spq->pending);
+	INIT_LIST_HEAD(&p_spq->completion_pending);
+	INIT_LIST_HEAD(&p_spq->free_pool);
+	INIT_LIST_HEAD(&p_spq->unlimited_pending);
+	spin_lock_init(&p_spq->lock);
+
+	/* SPQ empty pool */
+	p_phys	= p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
+	p_virt	= p_spq->p_virt;
+
+	for (i = 0; i < p_spq->chain.capacity; i++) {
+		p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
+		p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+
+		list_add_tail(&p_virt->list, &p_spq->free_pool);
+
+		p_virt++;
+		p_phys += sizeof(struct qed_spq_entry);
+	}
+
+	/* Statistics */
+	p_spq->normal_count		= 0;
+	p_spq->comp_count		= 0;
+	p_spq->comp_sent_count		= 0;
+	p_spq->unlimited_pending_count	= 0;
+	p_spq->echo_idx			= 0;
+
+	/* SPQ cid, cannot fail */
+	qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+	qed_spq_hw_initialize(p_hwfn, p_spq);
+
+	/* reset the chain itself */
+	qed_chain_reset(&p_spq->chain);
+}
+
+int qed_spq_alloc(struct qed_hwfn *p_hwfn)
+{
+	struct qed_spq		*p_spq	= NULL;
+	dma_addr_t		p_phys	= 0;
+	struct qed_spq_entry	*p_virt = NULL;
+
+	/* SPQ struct */
+	p_spq =
+		kzalloc(sizeof(struct qed_spq), GFP_ATOMIC);
+	if (!p_spq) {
+		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
+		return -ENOMEM;
+	}
+
+	/* SPQ ring  */
+	if (qed_chain_alloc(p_hwfn->cdev,
+			    QED_CHAIN_USE_TO_PRODUCE,
+			    QED_CHAIN_MODE_SINGLE,
+			    0,   /* N/A when the mode is SINGLE */
+			    sizeof(struct slow_path_element),
+			    &p_spq->chain)) {
+		DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
+		goto spq_allocate_fail;
+	}
+
+	/* allocate and fill the SPQ elements (incl. ramrod data list) */
+	p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+				    p_spq->chain.capacity *
+				    sizeof(struct qed_spq_entry),
+				    &p_phys,
+				    GFP_KERNEL);
+
+	if (!p_virt)
+		goto spq_allocate_fail;
+
+	p_spq->p_virt = p_virt;
+	p_spq->p_phys = p_phys;
+	p_hwfn->p_spq = p_spq;
+
+	return 0;
+
+spq_allocate_fail:
+	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+	kfree(p_spq);
+	return -ENOMEM;
+}
+
+void qed_spq_free(struct qed_hwfn *p_hwfn)
+{
+	struct qed_spq *p_spq = p_hwfn->p_spq;
+
+	if (!p_spq)
+		return;
+
+	if (p_spq->p_virt)
+		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+				  p_spq->chain.capacity *
+				  sizeof(struct qed_spq_entry),
+				  p_spq->p_virt,
+				  p_spq->p_phys);
+
+	qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+	;
+	kfree(p_spq);
+}
+
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+		  struct qed_spq_entry **pp_ent)
+{
+	struct qed_spq *p_spq = p_hwfn->p_spq;
+	struct qed_spq_entry *p_ent = NULL;
+	int rc = 0;
+
+	spin_lock_bh(&p_spq->lock);
+
+	if (list_empty(&p_spq->free_pool)) {
+		p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
+		if (!p_ent) {
+			rc = -ENOMEM;
+			goto out_unlock;
+		}
+		p_ent->queue = &p_spq->unlimited_pending;
+	} else {
+		p_ent = list_first_entry(&p_spq->free_pool,
+					 struct qed_spq_entry,
+					 list);
+		list_del(&p_ent->list);
+		p_ent->queue = &p_spq->pending;
+	}
+
+	*pp_ent = p_ent;
+
+out_unlock:
+	spin_unlock_bh(&p_spq->lock);
+	return rc;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+				   struct qed_spq_entry *p_ent)
+{
+	list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+			  struct qed_spq_entry *p_ent)
+{
+	spin_lock_bh(&p_hwfn->p_spq->lock);
+	__qed_spq_return_entry(p_hwfn, p_ent);
+	spin_unlock_bh(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * @brief qed_spq_add_entry - adds a new entry to the pending
+ *        list. Should be used while lock is being held.
+ *
+ * Addes an entry to the pending list is there is room (en empty
+ * element is available in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ * @param priority
+ *
+ * @return int
+ */
+static int
+qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+		  struct qed_spq_entry *p_ent,
+		  enum spq_priority priority)
+{
+	struct qed_spq *p_spq = p_hwfn->p_spq;
+
+	if (p_ent->queue == &p_spq->unlimited_pending) {
+		struct qed_spq_entry *p_en2;
+
+		if (list_empty(&p_spq->free_pool)) {
+			list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
+			p_spq->unlimited_pending_count++;
+
+			return 0;
+		}
+
+		p_en2 = list_first_entry(&p_spq->free_pool,
+					 struct qed_spq_entry,
+					 list);
+		list_del(&p_en2->list);
+
+		/* Strcut assignment */
+		*p_en2 = *p_ent;
+
+		kfree(p_ent);
+
+		p_ent = p_en2;
+	}
+
+	/* entry is to be placed in 'pending' queue */
+	switch (priority) {
+	case QED_SPQ_PRIORITY_NORMAL:
+		list_add_tail(&p_ent->list, &p_spq->pending);
+		p_spq->normal_count++;
+		break;
+	case QED_SPQ_PRIORITY_HIGH:
+		list_add(&p_ent->list, &p_spq->pending);
+		p_spq->high_count++;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/***************************************************************************
+* Accessor
+***************************************************************************/
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
+{
+	if (!p_hwfn->p_spq)
+		return 0xffffffff;      /* illegal */
+	return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+* Posting new Ramrods
+***************************************************************************/
+static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
+			     struct list_head *head,
+			     u32 keep_reserve)
+{
+	struct qed_spq *p_spq = p_hwfn->p_spq;
+	int rc;
+
+	while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+	       !list_empty(head)) {
+		struct qed_spq_entry *p_ent =
+			list_first_entry(head, struct qed_spq_entry, list);
+		list_del(&p_ent->list);
+		list_add_tail(&p_ent->list, &p_spq->completion_pending);
+		p_spq->comp_sent_count++;
+
+		rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
+		if (rc) {
+			list_del(&p_ent->list);
+			__qed_spq_return_entry(p_hwfn, p_ent);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+{
+	struct qed_spq *p_spq = p_hwfn->p_spq;
+	struct qed_spq_entry *p_ent = NULL;
+
+	while (!list_empty(&p_spq->free_pool)) {
+		if (list_empty(&p_spq->unlimited_pending))
+			break;
+
+		p_ent = list_first_entry(&p_spq->unlimited_pending,
+					 struct qed_spq_entry,
+					 list);
+		if (!p_ent)
+			return -EINVAL;
+
+		list_del(&p_ent->list);
+
+		qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+	}
+
+	return qed_spq_post_list(p_hwfn, &p_spq->pending,
+				 SPQ_HIGH_PRI_RESERVE_DEFAULT);
+}
+
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+		 struct qed_spq_entry *p_ent,
+		 u8 *fw_return_code)
+{
+	int rc = 0;
+	struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
+	bool b_ret_ent = true;
+
+	if (!p_hwfn)
+		return -EINVAL;
+
+	if (!p_ent) {
+		DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
+		return -EINVAL;
+	}
+
+	/* Complete the entry */
+	rc = qed_spq_fill_entry(p_hwfn, p_ent);
+
+	spin_lock_bh(&p_spq->lock);
+
+	/* Check return value after LOCK is taken for cleaner error flow */
+	if (rc)
+		goto spq_post_fail;
+
+	/* Add the request to the pending queue */
+	rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+	if (rc)
+		goto spq_post_fail;
+
+	rc = qed_spq_pend_post(p_hwfn);
+	if (rc) {
+		/* Since it's possible that pending failed for a different
+		 * entry [although unlikely], the failed entry was already
+		 * dealt with; No need to return it here.
+		 */
+		b_ret_ent = false;
+		goto spq_post_fail;
+	}
+
+	spin_unlock_bh(&p_spq->lock);
+
+	if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
+		/* For entries in QED BLOCK mode, the completion code cannot
+		 * perform the necessary cleanup - if it did, we couldn't
+		 * access p_ent here to see whether it's successful or not.
+		 * Thus, after gaining the answer perform the cleanup here.
+		 */
+		rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+		if (rc)
+			goto spq_post_fail2;
+
+		/* return to pool */
+		qed_spq_return_entry(p_hwfn, p_ent);
+	}
+	return rc;
+
+spq_post_fail2:
+	spin_lock_bh(&p_spq->lock);
+	list_del(&p_ent->list);
+	qed_chain_return_produced(&p_spq->chain);
+
+spq_post_fail:
+	/* return to the free pool */
+	if (b_ret_ent)
+		__qed_spq_return_entry(p_hwfn, p_ent);
+	spin_unlock_bh(&p_spq->lock);
+
+	return rc;
+}
+
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+		       __le16 echo,
+		       u8 fw_return_code,
+		       union event_ring_data *p_data)
+{
+	struct qed_spq		*p_spq;
+	struct qed_spq_entry	*p_ent = NULL;
+	struct qed_spq_entry	*tmp;
+	struct qed_spq_entry	*found = NULL;
+	int			rc;
+
+	if (!p_hwfn)
+		return -EINVAL;
+
+	p_spq = p_hwfn->p_spq;
+	if (!p_spq)
+		return -EINVAL;
+
+	spin_lock_bh(&p_spq->lock);
+	list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
+				 list) {
+		if (p_ent->elem.hdr.echo == echo) {
+			list_del(&p_ent->list);
+
+			qed_chain_return_produced(&p_spq->chain);
+			p_spq->comp_count++;
+			found = p_ent;
+			break;
+		}
+	}
+
+	/* Release lock before callback, as callback may post
+	 * an additional ramrod.
+	 */
+	spin_unlock_bh(&p_spq->lock);
+
+	if (!found) {
+		DP_NOTICE(p_hwfn,
+			  "Failed to find an entry this EQE completes\n");
+		return -EEXIST;
+	}
+
+	DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
+		   p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+	if (found->comp_cb.function)
+		found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+					fw_return_code);
+
+	if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
+		/* EBLOCK is responsible for freeing its own entry */
+		qed_spq_return_entry(p_hwfn, found);
+
+	/* Attempt to post pending requests */
+	spin_lock_bh(&p_spq->lock);
+	rc = qed_spq_pend_post(p_hwfn);
+	spin_unlock_bh(&p_spq->lock);
+
+	return rc;
+}
+
+struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
+{
+	struct qed_consq *p_consq;
+
+	/* Allocate ConsQ struct */
+	p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC);
+	if (!p_consq) {
+		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
+		return NULL;
+	}
+
+	/* Allocate and initialize EQ chain*/
+	if (qed_chain_alloc(p_hwfn->cdev,
+			    QED_CHAIN_USE_TO_PRODUCE,
+			    QED_CHAIN_MODE_PBL,
+			    QED_CHAIN_PAGE_SIZE / 0x80,
+			    0x80,
+			    &p_consq->chain)) {
+		DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
+		goto consq_allocate_fail;
+	}
+
+	return p_consq;
+
+consq_allocate_fail:
+	qed_consq_free(p_hwfn, p_consq);
+	return NULL;
+}
+
+void qed_consq_setup(struct qed_hwfn *p_hwfn,
+		     struct qed_consq *p_consq)
+{
+	qed_chain_reset(&p_consq->chain);
+}
+
+void qed_consq_free(struct qed_hwfn *p_hwfn,
+		    struct qed_consq *p_consq)
+{
+	if (!p_consq)
+		return;
+	qed_chain_free(p_hwfn->cdev, &p_consq->chain);
+	kfree(p_consq);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
new file mode 100644
index 0000000..06ff90d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_QEDE) := qede.o
+
+qede-y := qede_main.o qede_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
new file mode 100644
index 0000000..ea00d5f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -0,0 +1,285 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#ifndef _QEDE_H_
+#define _QEDE_H_
+#include <linux/compiler.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_eth_if.h>
+
+#define QEDE_MAJOR_VERSION		8
+#define QEDE_MINOR_VERSION		4
+#define QEDE_REVISION_VERSION		0
+#define QEDE_ENGINEERING_VERSION	0
+#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "."	\
+		__stringify(QEDE_MINOR_VERSION) "."		\
+		__stringify(QEDE_REVISION_VERSION) "."		\
+		__stringify(QEDE_ENGINEERING_VERSION)
+
+#define QEDE_ETH_INTERFACE_VERSION	300
+
+#define DRV_MODULE_SYM		qede
+
+struct qede_stats {
+	u64 no_buff_discards;
+	u64 rx_ucast_bytes;
+	u64 rx_mcast_bytes;
+	u64 rx_bcast_bytes;
+	u64 rx_ucast_pkts;
+	u64 rx_mcast_pkts;
+	u64 rx_bcast_pkts;
+	u64 mftag_filter_discards;
+	u64 mac_filter_discards;
+	u64 tx_ucast_bytes;
+	u64 tx_mcast_bytes;
+	u64 tx_bcast_bytes;
+	u64 tx_ucast_pkts;
+	u64 tx_mcast_pkts;
+	u64 tx_bcast_pkts;
+	u64 tx_err_drop_pkts;
+	u64 coalesced_pkts;
+	u64 coalesced_events;
+	u64 coalesced_aborts_num;
+	u64 non_coalesced_pkts;
+	u64 coalesced_bytes;
+
+	/* port */
+	u64 rx_64_byte_packets;
+	u64 rx_127_byte_packets;
+	u64 rx_255_byte_packets;
+	u64 rx_511_byte_packets;
+	u64 rx_1023_byte_packets;
+	u64 rx_1518_byte_packets;
+	u64 rx_1522_byte_packets;
+	u64 rx_2047_byte_packets;
+	u64 rx_4095_byte_packets;
+	u64 rx_9216_byte_packets;
+	u64 rx_16383_byte_packets;
+	u64 rx_crc_errors;
+	u64 rx_mac_crtl_frames;
+	u64 rx_pause_frames;
+	u64 rx_pfc_frames;
+	u64 rx_align_errors;
+	u64 rx_carrier_errors;
+	u64 rx_oversize_packets;
+	u64 rx_jabbers;
+	u64 rx_undersize_packets;
+	u64 rx_fragments;
+	u64 tx_64_byte_packets;
+	u64 tx_65_to_127_byte_packets;
+	u64 tx_128_to_255_byte_packets;
+	u64 tx_256_to_511_byte_packets;
+	u64 tx_512_to_1023_byte_packets;
+	u64 tx_1024_to_1518_byte_packets;
+	u64 tx_1519_to_2047_byte_packets;
+	u64 tx_2048_to_4095_byte_packets;
+	u64 tx_4096_to_9216_byte_packets;
+	u64 tx_9217_to_16383_byte_packets;
+	u64 tx_pause_frames;
+	u64 tx_pfc_frames;
+	u64 tx_lpi_entry_count;
+	u64 tx_total_collisions;
+	u64 brb_truncates;
+	u64 brb_discards;
+	u64 tx_mac_ctrl_frames;
+};
+
+struct qede_dev {
+	struct qed_dev			*cdev;
+	struct net_device		*ndev;
+	struct pci_dev			*pdev;
+
+	u32				dp_module;
+	u8				dp_level;
+
+	const struct qed_eth_ops	*ops;
+
+	struct qed_dev_eth_info	dev_info;
+#define QEDE_MAX_RSS_CNT(edev)	((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev)	((edev)->dev_info.num_queues * \
+				 (edev)->dev_info.num_tc)
+
+	struct qede_fastpath		*fp_array;
+	u16				num_rss;
+	u8				num_tc;
+#define QEDE_RSS_CNT(edev)		((edev)->num_rss)
+#define QEDE_TSS_CNT(edev)		((edev)->num_rss *	\
+					 (edev)->num_tc)
+#define QEDE_TSS_IDX(edev, txqidx)	((txqidx) % (edev)->num_rss)
+#define QEDE_TC_IDX(edev, txqidx)	((txqidx) / (edev)->num_rss)
+#define QEDE_TX_QUEUE(edev, txqidx)	\
+	(&(edev)->fp_array[QEDE_TSS_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX( \
+							(edev), (txqidx))])
+
+	struct qed_int_info		int_info;
+	unsigned char			primary_mac[ETH_ALEN];
+
+	/* Smaller private varaiant of the RTNL lock */
+	struct mutex			qede_lock;
+	u32				state; /* Protected by qede_lock */
+	u16				rx_buf_size;
+	/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVERHEAD			(ETH_HLEN + 8 + 8)
+	/* Max supported alignment is 256 (8 shift)
+	 * minimal alignment shift 6 is optimal for 57xxx HW performance
+	 */
+#define QEDE_RX_ALIGN_SHIFT		max(6, min(8, L1_CACHE_SHIFT))
+	/* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+	 * at the end of skb->data, to avoid wasting a full cache line.
+	 * This reduces memory use (skb->truesize).
+	 */
+#define QEDE_FW_RX_ALIGN_END					\
+	max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT,			\
+	      SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+	struct qede_stats		stats;
+	struct qed_update_vport_rss_params	rss_params;
+	u16			q_num_rx_buffers; /* Must be a power of two */
+	u16			q_num_tx_buffers; /* Must be a power of two */
+
+	struct delayed_work		sp_task;
+	unsigned long			sp_flags;
+};
+
+enum QEDE_STATE {
+	QEDE_STATE_CLOSED,
+	QEDE_STATE_OPEN,
+};
+
+#define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
+
+#define	MAX_NUM_TC	8
+#define	MAX_NUM_PRI	8
+
+/* The driver supports the new build_skb() API:
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after the frame was DMA-ed.
+ */
+struct sw_rx_data {
+	u8 *data;
+
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct qede_rx_queue {
+	__le16			*hw_cons_ptr;
+	struct sw_rx_data	*sw_rx_ring;
+	u16			sw_rx_cons;
+	u16			sw_rx_prod;
+	struct qed_chain	rx_bd_ring;
+	struct qed_chain	rx_comp_ring;
+	void __iomem		*hw_rxq_prod_addr;
+
+	int			rx_buf_size;
+
+	u16			num_rx_buffers;
+	u16			rxq_id;
+
+	u64			rx_hw_errors;
+	u64			rx_alloc_errors;
+};
+
+union db_prod {
+	struct eth_db_data data;
+	u32		raw;
+};
+
+struct sw_tx_bd {
+	struct sk_buff *skb;
+	u8 flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define QEDE_TSO_SPLIT_BD		BIT(0)
+};
+
+struct qede_tx_queue {
+	int			index; /* Queue index */
+	__le16			*hw_cons_ptr;
+	struct sw_tx_bd		*sw_tx_ring;
+	u16			sw_tx_cons;
+	u16			sw_tx_prod;
+	struct qed_chain	tx_pbl;
+	void __iomem		*doorbell_addr;
+	union db_prod		tx_db;
+
+	u16			num_tx_buffers;
+};
+
+#define BD_UNMAP_ADDR(bd)		HILO_U64(le32_to_cpu((bd)->addr.hi), \
+						 le32_to_cpu((bd)->addr.lo))
+#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len)				\
+	do {								\
+		(bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr));	\
+		(bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr));	\
+		(bd)->nbytes = cpu_to_le16(len);			\
+	} while (0)
+#define BD_UNMAP_LEN(bd)		(le16_to_cpu((bd)->nbytes))
+
+struct qede_fastpath {
+	struct qede_dev	*edev;
+	u8			rss_id;
+	struct napi_struct	napi;
+	struct qed_sb_info	*sb_info;
+	struct qede_rx_queue	*rxq;
+	struct qede_tx_queue	*txqs;
+
+#define VEC_NAME_SIZE	(sizeof(((struct net_device *)0)->name) + 8)
+	char	name[VEC_NAME_SIZE];
+};
+
+/* Debug print definitions */
+#define DP_NAME(edev) ((edev)->ndev->name)
+
+#define XMIT_PLAIN		0
+#define XMIT_L4_CSUM		BIT(0)
+#define XMIT_LSO		BIT(1)
+#define XMIT_ENC		BIT(2)
+
+#define QEDE_CSUM_ERROR			BIT(0)
+#define QEDE_CSUM_UNNECESSARY		BIT(1)
+
+#define QEDE_SP_RX_MODE		1
+
+union qede_reload_args {
+	u16 mtu;
+};
+
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
+void qede_set_ethtool_ops(struct net_device *netdev);
+void qede_reload(struct qede_dev *edev,
+		 void (*func)(struct qede_dev *edev,
+			      union qede_reload_args *args),
+		 union qede_reload_args *args);
+int qede_change_mtu(struct net_device *dev, int new_mtu);
+void qede_fill_by_demand_stats(struct qede_dev *edev);
+
+#define RX_RING_SIZE_POW	13
+#define RX_RING_SIZE		BIT(RX_RING_SIZE_POW)
+#define NUM_RX_BDS_MAX		(RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN		128
+#define NUM_RX_BDS_DEF		NUM_RX_BDS_MAX
+
+#define TX_RING_SIZE_POW	13
+#define TX_RING_SIZE		BIT(TX_RING_SIZE_POW)
+#define NUM_TX_BDS_MAX		(TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN		128
+#define NUM_TX_BDS_DEF		NUM_TX_BDS_MAX
+
+#define	for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
+
+#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
new file mode 100644
index 0000000..3a36247
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -0,0 +1,385 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/capability.h>
+#include "qede.h"
+
+#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, pf_only) \
+	 {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
+#define QEDE_PF_STAT(stat_name)		_QEDE_STAT(stat_name, true)
+#define QEDE_STAT(stat_name)		_QEDE_STAT(stat_name, false)
+
+#define QEDE_RQSTAT_OFFSET(stat_name) \
+	 (offsetof(struct qede_rx_queue, stat_name))
+#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_RQSTAT(stat_name) \
+	 {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+static const struct {
+	u64 offset;
+	char string[ETH_GSTRING_LEN];
+} qede_rqstats_arr[] = {
+	QEDE_RQSTAT(rx_hw_errors),
+	QEDE_RQSTAT(rx_alloc_errors),
+};
+
+#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
+#define QEDE_RQSTATS_DATA(dev, sindex, rqindex) \
+	(*((u64 *)(((char *)(dev->fp_array[(rqindex)].rxq)) +\
+		    qede_rqstats_arr[(sindex)].offset)))
+static const struct {
+	u64 offset;
+	char string[ETH_GSTRING_LEN];
+	bool pf_only;
+} qede_stats_arr[] = {
+	QEDE_STAT(rx_ucast_bytes),
+	QEDE_STAT(rx_mcast_bytes),
+	QEDE_STAT(rx_bcast_bytes),
+	QEDE_STAT(rx_ucast_pkts),
+	QEDE_STAT(rx_mcast_pkts),
+	QEDE_STAT(rx_bcast_pkts),
+
+	QEDE_STAT(tx_ucast_bytes),
+	QEDE_STAT(tx_mcast_bytes),
+	QEDE_STAT(tx_bcast_bytes),
+	QEDE_STAT(tx_ucast_pkts),
+	QEDE_STAT(tx_mcast_pkts),
+	QEDE_STAT(tx_bcast_pkts),
+
+	QEDE_PF_STAT(rx_64_byte_packets),
+	QEDE_PF_STAT(rx_127_byte_packets),
+	QEDE_PF_STAT(rx_255_byte_packets),
+	QEDE_PF_STAT(rx_511_byte_packets),
+	QEDE_PF_STAT(rx_1023_byte_packets),
+	QEDE_PF_STAT(rx_1518_byte_packets),
+	QEDE_PF_STAT(rx_1522_byte_packets),
+	QEDE_PF_STAT(rx_2047_byte_packets),
+	QEDE_PF_STAT(rx_4095_byte_packets),
+	QEDE_PF_STAT(rx_9216_byte_packets),
+	QEDE_PF_STAT(rx_16383_byte_packets),
+	QEDE_PF_STAT(tx_64_byte_packets),
+	QEDE_PF_STAT(tx_65_to_127_byte_packets),
+	QEDE_PF_STAT(tx_128_to_255_byte_packets),
+	QEDE_PF_STAT(tx_256_to_511_byte_packets),
+	QEDE_PF_STAT(tx_512_to_1023_byte_packets),
+	QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
+	QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
+	QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
+	QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
+	QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
+
+	QEDE_PF_STAT(rx_mac_crtl_frames),
+	QEDE_PF_STAT(tx_mac_ctrl_frames),
+	QEDE_PF_STAT(rx_pause_frames),
+	QEDE_PF_STAT(tx_pause_frames),
+	QEDE_PF_STAT(rx_pfc_frames),
+	QEDE_PF_STAT(tx_pfc_frames),
+
+	QEDE_PF_STAT(rx_crc_errors),
+	QEDE_PF_STAT(rx_align_errors),
+	QEDE_PF_STAT(rx_carrier_errors),
+	QEDE_PF_STAT(rx_oversize_packets),
+	QEDE_PF_STAT(rx_jabbers),
+	QEDE_PF_STAT(rx_undersize_packets),
+	QEDE_PF_STAT(rx_fragments),
+	QEDE_PF_STAT(tx_lpi_entry_count),
+	QEDE_PF_STAT(tx_total_collisions),
+	QEDE_PF_STAT(brb_truncates),
+	QEDE_PF_STAT(brb_discards),
+	QEDE_STAT(no_buff_discards),
+	QEDE_PF_STAT(mftag_filter_discards),
+	QEDE_PF_STAT(mac_filter_discards),
+	QEDE_STAT(tx_err_drop_pkts),
+
+	QEDE_STAT(coalesced_pkts),
+	QEDE_STAT(coalesced_events),
+	QEDE_STAT(coalesced_aborts_num),
+	QEDE_STAT(non_coalesced_pkts),
+	QEDE_STAT(coalesced_bytes),
+};
+
+#define QEDE_STATS_DATA(dev, index) \
+	(*((u64 *)(((char *)(dev)) + offsetof(struct qede_dev, stats) \
+			+ qede_stats_arr[(index)].offset)))
+
+#define QEDE_NUM_STATS	ARRAY_SIZE(qede_stats_arr)
+
+static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
+{
+	int i, j, k;
+
+	for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+		strcpy(buf + j * ETH_GSTRING_LEN,
+		       qede_stats_arr[i].string);
+		j++;
+	}
+
+	for (k = 0; k < QEDE_NUM_RQSTATS; k++, j++)
+		strcpy(buf + j * ETH_GSTRING_LEN,
+		       qede_rqstats_arr[k].string);
+}
+
+static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		qede_get_strings_stats(edev, buf);
+		break;
+	default:
+		DP_VERBOSE(edev, QED_MSG_DEBUG,
+			   "Unsupported stringset 0x%08x\n", stringset);
+	}
+}
+
+static void qede_get_ethtool_stats(struct net_device *dev,
+				   struct ethtool_stats *stats, u64 *buf)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	int sidx, cnt = 0;
+	int qid;
+
+	qede_fill_by_demand_stats(edev);
+
+	mutex_lock(&edev->qede_lock);
+
+	for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
+		buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+
+	for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
+		buf[cnt] = 0;
+		for (qid = 0; qid < edev->num_rss; qid++)
+			buf[cnt] += QEDE_RQSTATS_DATA(edev, sidx, qid);
+		cnt++;
+	}
+
+	mutex_unlock(&edev->qede_lock);
+}
+
+static int qede_get_sset_count(struct net_device *dev, int stringset)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	int num_stats = QEDE_NUM_STATS;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		return num_stats + QEDE_NUM_RQSTATS;
+
+	default:
+		DP_VERBOSE(edev, QED_MSG_DEBUG,
+			   "Unsupported stringset 0x%08x\n", stringset);
+		return -EINVAL;
+	}
+}
+
+static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	struct qed_link_output current_link;
+
+	memset(&current_link, 0, sizeof(current_link));
+	edev->ops->common->get_link(edev->cdev, &current_link);
+
+	cmd->supported = current_link.supported_caps;
+	cmd->advertising = current_link.advertised_caps;
+	if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
+		ethtool_cmd_speed_set(cmd, current_link.speed);
+		cmd->duplex = current_link.duplex;
+	} else {
+		cmd->duplex = DUPLEX_UNKNOWN;
+		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+	}
+	cmd->port = current_link.port;
+	cmd->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
+						AUTONEG_DISABLE;
+	cmd->lp_advertising = current_link.lp_caps;
+
+	return 0;
+}
+
+static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	struct qed_link_output current_link;
+	struct qed_link_params params;
+	u32 speed;
+
+	if (edev->dev_info.common.is_mf) {
+		DP_INFO(edev,
+			"Link parameters can not be changed in MF mode\n");
+		return -EOPNOTSUPP;
+	}
+
+	memset(&current_link, 0, sizeof(current_link));
+	memset(&params, 0, sizeof(params));
+	edev->ops->common->get_link(edev->cdev, &current_link);
+
+	speed = ethtool_cmd_speed(cmd);
+	params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
+	params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		params.autoneg = true;
+		params.forced_speed = 0;
+		params.adv_speeds = cmd->advertising;
+	} else { /* forced speed */
+		params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
+		params.autoneg = false;
+		params.forced_speed = speed;
+		switch (speed) {
+		case SPEED_10000:
+			if (!(current_link.supported_caps &
+			    SUPPORTED_10000baseKR_Full)) {
+				DP_INFO(edev, "10G speed not supported\n");
+				return -EINVAL;
+			}
+			params.adv_speeds = SUPPORTED_10000baseKR_Full;
+			break;
+		case SPEED_40000:
+			if (!(current_link.supported_caps &
+			    SUPPORTED_40000baseLR4_Full)) {
+				DP_INFO(edev, "40G speed not supported\n");
+				return -EINVAL;
+			}
+			params.adv_speeds = SUPPORTED_40000baseLR4_Full;
+			break;
+		default:
+			DP_INFO(edev, "Unsupported speed %u\n", speed);
+			return -EINVAL;
+		}
+	}
+
+	params.link_up = true;
+	edev->ops->common->set_link(edev->cdev, &params);
+
+	return 0;
+}
+
+static void qede_get_drvinfo(struct net_device *ndev,
+			     struct ethtool_drvinfo *info)
+{
+	char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
+	struct qede_dev *edev = netdev_priv(ndev);
+
+	strlcpy(info->driver, "qede", sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+
+	snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+		 edev->dev_info.common.fw_major,
+		 edev->dev_info.common.fw_minor,
+		 edev->dev_info.common.fw_rev,
+		 edev->dev_info.common.fw_eng);
+
+	snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+		 (edev->dev_info.common.mfw_rev >> 24) & 0xFF,
+		 (edev->dev_info.common.mfw_rev >> 16) & 0xFF,
+		 (edev->dev_info.common.mfw_rev >> 8) & 0xFF,
+		 edev->dev_info.common.mfw_rev & 0xFF);
+
+	if ((strlen(storm) + strlen(mfw) + strlen("mfw storm  ")) <
+	    sizeof(info->fw_version)) {
+		snprintf(info->fw_version, sizeof(info->fw_version),
+			 "mfw %s storm %s", mfw, storm);
+	} else {
+		snprintf(info->fw_version, sizeof(info->fw_version),
+			 "%s %s", mfw, storm);
+	}
+
+	strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
+}
+
+static u32 qede_get_msglevel(struct net_device *ndev)
+{
+	struct qede_dev *edev = netdev_priv(ndev);
+
+	return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) |
+	       edev->dp_module;
+}
+
+static void qede_set_msglevel(struct net_device *ndev, u32 level)
+{
+	struct qede_dev *edev = netdev_priv(ndev);
+	u32 dp_module = 0;
+	u8 dp_level = 0;
+
+	qede_config_debug(level, &dp_module, &dp_level);
+
+	edev->dp_level = dp_level;
+	edev->dp_module = dp_module;
+	edev->ops->common->update_msglvl(edev->cdev,
+					 dp_module, dp_level);
+}
+
+static u32 qede_get_link(struct net_device *dev)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+	struct qed_link_output current_link;
+
+	memset(&current_link, 0, sizeof(current_link));
+	edev->ops->common->get_link(edev->cdev, &current_link);
+
+	return current_link.link_up;
+}
+
+static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
+{
+	edev->ndev->mtu = args->mtu;
+}
+
+/* Netdevice NDOs */
+#define ETH_MAX_JUMBO_PACKET_SIZE	9600
+#define ETH_MIN_PACKET_SIZE		60
+int qede_change_mtu(struct net_device *ndev, int new_mtu)
+{
+	struct qede_dev *edev = netdev_priv(ndev);
+	union qede_reload_args args;
+
+	if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+	    ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
+		DP_ERR(edev, "Can't support requested MTU size\n");
+		return -EINVAL;
+	}
+
+	DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+		   "Configuring MTU size of %d\n", new_mtu);
+
+	/* Set the mtu field and re-start the interface if needed*/
+	args.mtu = new_mtu;
+
+	if (netif_running(edev->ndev))
+		qede_reload(edev, &qede_update_mtu, &args);
+
+	qede_update_mtu(edev, &args);
+
+	return 0;
+}
+
+static const struct ethtool_ops qede_ethtool_ops = {
+	.get_settings = qede_get_settings,
+	.set_settings = qede_set_settings,
+	.get_drvinfo = qede_get_drvinfo,
+	.get_msglevel = qede_get_msglevel,
+	.set_msglevel = qede_set_msglevel,
+	.get_link = qede_get_link,
+	.get_strings = qede_get_strings,
+	.get_ethtool_stats = qede_get_ethtool_stats,
+	.get_sset_count = qede_get_sset_count,
+
+};
+
+void qede_set_ethtool_ops(struct net_device *dev)
+{
+	dev->ethtool_ops = &qede_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
new file mode 100644
index 0000000..f4657a2
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -0,0 +1,2584 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/io.h>
+#include <linux/netdev_features.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <net/vxlan.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/pkt_sched.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/random.h>
+#include <net/ip6_checksum.h>
+#include <linux/bitops.h>
+
+#include "qede.h"
+
+static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede "
+			      DRV_MODULE_VERSION "\n";
+
+MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static uint debug;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+static const struct qed_eth_ops *qed_ops;
+
+#define CHIP_NUM_57980S_40		0x1634
+#define CHIP_NUM_57980S_10		0x1635
+#define CHIP_NUM_57980S_MF		0x1636
+#define CHIP_NUM_57980S_100		0x1644
+#define CHIP_NUM_57980S_50		0x1654
+#define CHIP_NUM_57980S_25		0x1656
+
+#ifndef PCI_DEVICE_ID_NX2_57980E
+#define PCI_DEVICE_ID_57980S_40		CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_57980S_10		CHIP_NUM_57980S_10
+#define PCI_DEVICE_ID_57980S_MF		CHIP_NUM_57980S_MF
+#define PCI_DEVICE_ID_57980S_100	CHIP_NUM_57980S_100
+#define PCI_DEVICE_ID_57980S_50		CHIP_NUM_57980S_50
+#define PCI_DEVICE_ID_57980S_25		CHIP_NUM_57980S_25
+#endif
+
+static const struct pci_device_id qede_pci_tbl[] = {
+	{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
+	{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
+	{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
+	{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
+	{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
+	{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+
+#define TX_TIMEOUT		(5 * HZ)
+
+static void qede_remove(struct pci_dev *pdev);
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+				struct qede_rx_queue *rxq);
+static void qede_link_update(void *dev, struct qed_link_output *link);
+
+static struct pci_driver qede_pci_driver = {
+	.name = "qede",
+	.id_table = qede_pci_tbl,
+	.probe = qede_probe,
+	.remove = qede_remove,
+};
+
+static struct qed_eth_cb_ops qede_ll_ops = {
+	{
+		.link_update = qede_link_update,
+	},
+};
+
+static int qede_netdev_event(struct notifier_block *this, unsigned long event,
+			     void *ptr)
+{
+	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+	struct ethtool_drvinfo drvinfo;
+	struct qede_dev *edev;
+
+	/* Currently only support name change */
+	if (event != NETDEV_CHANGENAME)
+		goto done;
+
+	/* Check whether this is a qede device */
+	if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
+		goto done;
+
+	memset(&drvinfo, 0, sizeof(drvinfo));
+	ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
+	if (strcmp(drvinfo.driver, "qede"))
+		goto done;
+	edev = netdev_priv(ndev);
+
+	/* Notify qed of the name change */
+	if (!edev->ops || !edev->ops->common)
+		goto done;
+	edev->ops->common->set_id(edev->cdev, edev->ndev->name,
+				  "qede");
+
+done:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block qede_netdev_notifier = {
+	.notifier_call = qede_netdev_event,
+};
+
+static
+int __init qede_init(void)
+{
+	int ret;
+	u32 qed_ver;
+
+	pr_notice("qede_init: %s\n", version);
+
+	qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
+	if (qed_ver !=  QEDE_ETH_INTERFACE_VERSION) {
+		pr_notice("Version mismatch [%08x != %08x]\n",
+			  qed_ver,
+			  QEDE_ETH_INTERFACE_VERSION);
+		return -EINVAL;
+	}
+
+	qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+	if (!qed_ops) {
+		pr_notice("Failed to get qed ethtool operations\n");
+		return -EINVAL;
+	}
+
+	/* Must register notifier before pci ops, since we might miss
+	 * interface rename after pci probe and netdev registeration.
+	 */
+	ret = register_netdevice_notifier(&qede_netdev_notifier);
+	if (ret) {
+		pr_notice("Failed to register netdevice_notifier\n");
+		qed_put_eth_ops();
+		return -EINVAL;
+	}
+
+	ret = pci_register_driver(&qede_pci_driver);
+	if (ret) {
+		pr_notice("Failed to register driver\n");
+		unregister_netdevice_notifier(&qede_netdev_notifier);
+		qed_put_eth_ops();
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void __exit qede_cleanup(void)
+{
+	pr_notice("qede_cleanup called\n");
+
+	unregister_netdevice_notifier(&qede_netdev_notifier);
+	pci_unregister_driver(&qede_pci_driver);
+	qed_put_eth_ops();
+}
+
+module_init(qede_init);
+module_exit(qede_cleanup);
+
+/* -------------------------------------------------------------------------
+ * START OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+/* Unmap the data and free skb */
+static int qede_free_tx_pkt(struct qede_dev *edev,
+			    struct qede_tx_queue *txq,
+			    int *len)
+{
+	u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+	struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+	struct eth_tx_1st_bd *first_bd;
+	struct eth_tx_bd *tx_data_bd;
+	int bds_consumed = 0;
+	int nbds;
+	bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
+	int i, split_bd_len = 0;
+
+	if (unlikely(!skb)) {
+		DP_ERR(edev,
+		       "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
+		       idx, txq->sw_tx_cons, txq->sw_tx_prod);
+		return -1;
+	}
+
+	*len = skb->len;
+
+	first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+	bds_consumed++;
+
+	nbds = first_bd->data.nbds;
+
+	if (data_split) {
+		struct eth_tx_bd *split = (struct eth_tx_bd *)
+			qed_chain_consume(&txq->tx_pbl);
+		split_bd_len = BD_UNMAP_LEN(split);
+		bds_consumed++;
+	}
+	dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+		       BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+	/* Unmap the data of the skb frags */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
+		tx_data_bd = (struct eth_tx_bd *)
+			qed_chain_consume(&txq->tx_pbl);
+		dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+	}
+
+	while (bds_consumed++ < nbds)
+		qed_chain_consume(&txq->tx_pbl);
+
+	/* Free skb */
+	dev_kfree_skb_any(skb);
+	txq->sw_tx_ring[idx].skb = NULL;
+	txq->sw_tx_ring[idx].flags = 0;
+
+	return 0;
+}
+
+/* Unmap the data and free skb when mapping failed during start_xmit */
+static void qede_free_failed_tx_pkt(struct qede_dev *edev,
+				    struct qede_tx_queue *txq,
+				    struct eth_tx_1st_bd *first_bd,
+				    int nbd,
+				    bool data_split)
+{
+	u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+	struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+	struct eth_tx_bd *tx_data_bd;
+	int i, split_bd_len = 0;
+
+	/* Return prod to its position before this skb was handled */
+	qed_chain_set_prod(&txq->tx_pbl,
+			   le16_to_cpu(txq->tx_db.data.bd_prod),
+			   first_bd);
+
+	first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+	if (data_split) {
+		struct eth_tx_bd *split = (struct eth_tx_bd *)
+					  qed_chain_produce(&txq->tx_pbl);
+		split_bd_len = BD_UNMAP_LEN(split);
+		nbd--;
+	}
+
+	dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+		       BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+	/* Unmap the data of the skb frags */
+	for (i = 0; i < nbd; i++) {
+		tx_data_bd = (struct eth_tx_bd *)
+			qed_chain_produce(&txq->tx_pbl);
+		if (tx_data_bd->nbytes)
+			dma_unmap_page(&edev->pdev->dev,
+				       BD_UNMAP_ADDR(tx_data_bd),
+				       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+	}
+
+	/* Return again prod to its position before this skb was handled */
+	qed_chain_set_prod(&txq->tx_pbl,
+			   le16_to_cpu(txq->tx_db.data.bd_prod),
+			   first_bd);
+
+	/* Free skb */
+	dev_kfree_skb_any(skb);
+	txq->sw_tx_ring[idx].skb = NULL;
+	txq->sw_tx_ring[idx].flags = 0;
+}
+
+static u32 qede_xmit_type(struct qede_dev *edev,
+			  struct sk_buff *skb,
+			  int *ipv6_ext)
+{
+	u32 rc = XMIT_L4_CSUM;
+	__be16 l3_proto;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return XMIT_PLAIN;
+
+	l3_proto = vlan_get_protocol(skb);
+	if (l3_proto == htons(ETH_P_IPV6) &&
+	    (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+		*ipv6_ext = 1;
+
+	if (skb_is_gso(skb))
+		rc |= XMIT_LSO;
+
+	return rc;
+}
+
+static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
+					 struct eth_tx_2nd_bd *second_bd,
+					 struct eth_tx_3rd_bd *third_bd)
+{
+	u8 l4_proto;
+	u16 bd2_bits = 0, bd2_bits2 = 0;
+
+	bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+
+	bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+		     ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+		    << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+
+	bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+		      ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
+
+	if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
+		l4_proto = ipv6_hdr(skb)->nexthdr;
+	else
+		l4_proto = ip_hdr(skb)->protocol;
+
+	if (l4_proto == IPPROTO_UDP)
+		bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+
+	if (third_bd) {
+		third_bd->data.bitfields |=
+			((tcp_hdrlen(skb) / 4) &
+			 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+			ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT;
+	}
+
+	second_bd->data.bitfields = cpu_to_le16(bd2_bits);
+	second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
+}
+
+static int map_frag_to_bd(struct qede_dev *edev,
+			  skb_frag_t *frag,
+			  struct eth_tx_bd *bd)
+{
+	dma_addr_t mapping;
+
+	/* Map skb non-linear frag data for DMA */
+	mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
+				   skb_frag_size(frag),
+				   DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+		DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
+		return -ENOMEM;
+	}
+
+	/* Setup the data pointer of the frag data */
+	BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
+
+	return 0;
+}
+
+/* Main transmit function */
+static
+netdev_tx_t qede_start_xmit(struct sk_buff *skb,
+			    struct net_device *ndev)
+{
+	struct qede_dev *edev = netdev_priv(ndev);
+	struct netdev_queue *netdev_txq;
+	struct qede_tx_queue *txq;
+	struct eth_tx_1st_bd *first_bd;
+	struct eth_tx_2nd_bd *second_bd = NULL;
+	struct eth_tx_3rd_bd *third_bd = NULL;
+	struct eth_tx_bd *tx_data_bd = NULL;
+	u16 txq_index;
+	u8 nbd = 0;
+	dma_addr_t mapping;
+	int rc, frag_idx = 0, ipv6_ext = 0;
+	u8 xmit_type;
+	u16 idx;
+	u16 hlen;
+	bool data_split;
+
+	/* Get tx-queue context and netdev index */
+	txq_index = skb_get_queue_mapping(skb);
+	WARN_ON(txq_index >= QEDE_TSS_CNT(edev));
+	txq = QEDE_TX_QUEUE(edev, txq_index);
+	netdev_txq = netdev_get_tx_queue(ndev, txq_index);
+
+	/* Current code doesn't support SKB linearization, since the max number
+	 * of skb frags can be passed in the FW HSI.
+	 */
+	BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET);
+
+	WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) <
+			       (MAX_SKB_FRAGS + 1));
+
+	xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+
+	/* Fill the entry in the SW ring and the BDs in the FW ring */
+	idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+	txq->sw_tx_ring[idx].skb = skb;
+	first_bd = (struct eth_tx_1st_bd *)
+		   qed_chain_produce(&txq->tx_pbl);
+	memset(first_bd, 0, sizeof(*first_bd));
+	first_bd->data.bd_flags.bitfields =
+		1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+	/* Map skb linear data for DMA and set in the first BD */
+	mapping = dma_map_single(&edev->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+		DP_NOTICE(edev, "SKB mapping failed\n");
+		qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+		return NETDEV_TX_OK;
+	}
+	nbd++;
+	BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+	/* In case there is IPv6 with extension headers or LSO we need 2nd and
+	 * 3rd BDs.
+	 */
+	if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
+		second_bd = (struct eth_tx_2nd_bd *)
+			qed_chain_produce(&txq->tx_pbl);
+		memset(second_bd, 0, sizeof(*second_bd));
+
+		nbd++;
+		third_bd = (struct eth_tx_3rd_bd *)
+			qed_chain_produce(&txq->tx_pbl);
+		memset(third_bd, 0, sizeof(*third_bd));
+
+		nbd++;
+		/* We need to fill in additional data in second_bd... */
+		tx_data_bd = (struct eth_tx_bd *)second_bd;
+	}
+
+	if (skb_vlan_tag_present(skb)) {
+		first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+		first_bd->data.bd_flags.bitfields |=
+			1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+	}
+
+	/* Fill the parsing flags & params according to the requested offload */
+	if (xmit_type & XMIT_L4_CSUM) {
+		/* We don't re-calculate IP checksum as it is already done by
+		 * the upper stack
+		 */
+		first_bd->data.bd_flags.bitfields |=
+			1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+		/* If the packet is IPv6 with extension header, indicate that
+		 * to FW and pass few params, since the device cracker doesn't
+		 * support parsing IPv6 with extension header/s.
+		 */
+		if (unlikely(ipv6_ext))
+			qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
+	}
+
+	if (xmit_type & XMIT_LSO) {
+		first_bd->data.bd_flags.bitfields |=
+			(1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
+		third_bd->data.lso_mss =
+			cpu_to_le16(skb_shinfo(skb)->gso_size);
+
+		first_bd->data.bd_flags.bitfields |=
+		1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+		hlen = skb_transport_header(skb) +
+		       tcp_hdrlen(skb) - skb->data;
+
+		/* @@@TBD - if will not be removed need to check */
+		third_bd->data.bitfields |=
+			(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+
+		/* Make life easier for FW guys who can't deal with header and
+		 * data on same BD. If we need to split, use the second bd...
+		 */
+		if (unlikely(skb_headlen(skb) > hlen)) {
+			DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+				   "TSO split header size is %d (%x:%x)\n",
+				   first_bd->nbytes, first_bd->addr.hi,
+				   first_bd->addr.lo);
+
+			mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
+					   le32_to_cpu(first_bd->addr.lo)) +
+					   hlen;
+
+			BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
+					      le16_to_cpu(first_bd->nbytes) -
+					      hlen);
+
+			/* this marks the BD as one that has no
+			 * individual mapping
+			 */
+			txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
+
+			first_bd->nbytes = cpu_to_le16(hlen);
+
+			tx_data_bd = (struct eth_tx_bd *)third_bd;
+			data_split = true;
+		}
+	}
+
+	/* Handle fragmented skb */
+	/* special handle for frags inside 2nd and 3rd bds.. */
+	while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
+		rc = map_frag_to_bd(edev,
+				    &skb_shinfo(skb)->frags[frag_idx],
+				    tx_data_bd);
+		if (rc) {
+			qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+						data_split);
+			return NETDEV_TX_OK;
+		}
+
+		if (tx_data_bd == (struct eth_tx_bd *)second_bd)
+			tx_data_bd = (struct eth_tx_bd *)third_bd;
+		else
+			tx_data_bd = NULL;
+
+		frag_idx++;
+	}
+
+	/* map last frags into 4th, 5th .... */
+	for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
+		tx_data_bd = (struct eth_tx_bd *)
+			     qed_chain_produce(&txq->tx_pbl);
+
+		memset(tx_data_bd, 0, sizeof(*tx_data_bd));
+
+		rc = map_frag_to_bd(edev,
+				    &skb_shinfo(skb)->frags[frag_idx],
+				    tx_data_bd);
+		if (rc) {
+			qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
+						data_split);
+			return NETDEV_TX_OK;
+		}
+	}
+
+	/* update the first BD with the actual num BDs */
+	first_bd->data.nbds = nbd;
+
+	netdev_tx_sent_queue(netdev_txq, skb->len);
+
+	skb_tx_timestamp(skb);
+
+	/* Advance packet producer only before sending the packet since mapping
+	 * of pages may fail.
+	 */
+	txq->sw_tx_prod++;
+
+	/* 'next page' entries are counted in the producer value */
+	txq->tx_db.data.bd_prod =
+		cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+
+	/* wmb makes sure that the BDs data is updated before updating the
+	 * producer, otherwise FW may read old data from the BDs.
+	 */
+	wmb();
+	barrier();
+	writel(txq->tx_db.raw, txq->doorbell_addr);
+
+	/* mmiowb is needed to synchronize doorbell writes from more than one
+	 * processor. It guarantees that the write arrives to the device before
+	 * the queue lock is released and another start_xmit is called (possibly
+	 * on another CPU). Without this barrier, the next doorbell can bypass
+	 * this doorbell. This is applicable to IA64/Altix systems.
+	 */
+	mmiowb();
+
+	if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
+		      < (MAX_SKB_FRAGS + 1))) {
+		netif_tx_stop_queue(netdev_txq);
+		DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+			   "Stop queue was called\n");
+		/* paired memory barrier is in qede_tx_int(), we have to keep
+		 * ordering of set_bit() in netif_tx_stop_queue() and read of
+		 * fp->bd_tx_cons
+		 */
+		smp_mb();
+
+		if (qed_chain_get_elem_left(&txq->tx_pbl)
+		     >= (MAX_SKB_FRAGS + 1) &&
+		    (edev->state == QEDE_STATE_OPEN)) {
+			netif_tx_wake_queue(netdev_txq);
+			DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+				   "Wake queue was called\n");
+		}
+	}
+
+	return NETDEV_TX_OK;
+}
+
+static int qede_txq_has_work(struct qede_tx_queue *txq)
+{
+	u16 hw_bd_cons;
+
+	/* Tell compiler that consumer and producer can change */
+	barrier();
+	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+	if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
+		return 0;
+
+	return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
+}
+
+static int qede_tx_int(struct qede_dev *edev,
+		       struct qede_tx_queue *txq)
+{
+	struct netdev_queue *netdev_txq;
+	u16 hw_bd_cons;
+	unsigned int pkts_compl = 0, bytes_compl = 0;
+	int rc;
+
+	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+
+	hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+	barrier();
+
+	while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+		int len = 0;
+
+		rc = qede_free_tx_pkt(edev, txq, &len);
+		if (rc) {
+			DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
+				  hw_bd_cons,
+				  qed_chain_get_cons_idx(&txq->tx_pbl));
+			break;
+		}
+
+		bytes_compl += len;
+		pkts_compl++;
+		txq->sw_tx_cons++;
+	}
+
+	netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+
+	/* Need to make the tx_bd_cons update visible to start_xmit()
+	 * before checking for netif_tx_queue_stopped().  Without the
+	 * memory barrier, there is a small possibility that
+	 * start_xmit() will miss it and cause the queue to be stopped
+	 * forever.
+	 * On the other hand we need an rmb() here to ensure the proper
+	 * ordering of bit testing in the following
+	 * netif_tx_queue_stopped(txq) call.
+	 */
+	smp_mb();
+
+	if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
+		/* Taking tx_lock is needed to prevent reenabling the queue
+		 * while it's empty. This could have happen if rx_action() gets
+		 * suspended in qede_tx_int() after the condition before
+		 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
+		 *
+		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
+		 * sends some packets consuming the whole queue again->
+		 * stops the queue
+		 */
+
+		__netif_tx_lock(netdev_txq, smp_processor_id());
+
+		if ((netif_tx_queue_stopped(netdev_txq)) &&
+		    (edev->state == QEDE_STATE_OPEN) &&
+		    (qed_chain_get_elem_left(&txq->tx_pbl)
+		      >= (MAX_SKB_FRAGS + 1))) {
+			netif_tx_wake_queue(netdev_txq);
+			DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
+				   "Wake queue was called\n");
+		}
+
+		__netif_tx_unlock(netdev_txq);
+	}
+
+	return 0;
+}
+
+static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+{
+	u16 hw_comp_cons, sw_comp_cons;
+
+	/* Tell compiler that status block fields can change */
+	barrier();
+
+	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+	return hw_comp_cons != sw_comp_cons;
+}
+
+static bool qede_has_tx_work(struct qede_fastpath *fp)
+{
+	u8 tc;
+
+	for (tc = 0; tc < fp->edev->num_tc; tc++)
+		if (qede_txq_has_work(&fp->txqs[tc]))
+			return true;
+	return false;
+}
+
+/* This function copies the Rx buffer from the CONS position to the PROD
+ * position, since we failed to allocate a new Rx buffer.
+ */
+static void qede_reuse_rx_data(struct qede_rx_queue *rxq)
+{
+	struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+	struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+	struct sw_rx_data *sw_rx_data_cons =
+		&rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+	struct sw_rx_data *sw_rx_data_prod =
+		&rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+	dma_unmap_addr_set(sw_rx_data_prod, mapping,
+			   dma_unmap_addr(sw_rx_data_cons, mapping));
+
+	sw_rx_data_prod->data = sw_rx_data_cons->data;
+	memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd));
+
+	rxq->sw_rx_cons++;
+	rxq->sw_rx_prod++;
+}
+
+static inline void qede_update_rx_prod(struct qede_dev *edev,
+				       struct qede_rx_queue *rxq)
+{
+	u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
+	u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
+	struct eth_rx_prod_data rx_prods = {0};
+
+	/* Update producers */
+	rx_prods.bd_prod = cpu_to_le16(bd_prod);
+	rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
+
+	/* Make sure that the BD and SGE data is updated before updating the
+	 * producers since FW might read the BD/SGE right after the producer
+	 * is updated.
+	 */
+	wmb();
+
+	internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+			(u32 *)&rx_prods);
+
+	/* mmiowb is needed to synchronize doorbell writes from more than one
+	 * processor. It guarantees that the write arrives to the device before
+	 * the napi lock is released and another qede_poll is called (possibly
+	 * on another CPU). Without this barrier, the next doorbell can bypass
+	 * this doorbell. This is applicable to IA64/Altix systems.
+	 */
+	mmiowb();
+}
+
+static u32 qede_get_rxhash(struct qede_dev *edev,
+			   u8 bitfields,
+			   __le32 rss_hash,
+			   enum pkt_hash_types *rxhash_type)
+{
+	enum rss_hash_type htype;
+
+	htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+
+	if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
+		*rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+				(htype == RSS_HASH_TYPE_IPV6)) ?
+				PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+		return le32_to_cpu(rss_hash);
+	}
+	*rxhash_type = PKT_HASH_TYPE_NONE;
+	return 0;
+}
+
+static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
+{
+	skb_checksum_none_assert(skb);
+
+	if (csum_flag & QEDE_CSUM_UNNECESSARY)
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static inline void qede_skb_receive(struct qede_dev *edev,
+				    struct qede_fastpath *fp,
+				    struct sk_buff *skb,
+				    u16 vlan_tag)
+{
+	if (vlan_tag)
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				       vlan_tag);
+
+	napi_gro_receive(&fp->napi, skb);
+}
+
+static u8 qede_check_csum(u16 flag)
+{
+	u16 csum_flag = 0;
+	u8 csum = 0;
+
+	if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+	     PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+		csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+			     PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+		csum = QEDE_CSUM_UNNECESSARY;
+	}
+
+	csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+		     PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+	if (csum_flag & flag)
+		return QEDE_CSUM_ERROR;
+
+	return csum;
+}
+
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+	struct qede_dev *edev = fp->edev;
+	struct qede_rx_queue *rxq = fp->rxq;
+
+	u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
+	int rx_pkt = 0;
+	u8 csum_flag;
+
+	hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+	sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+	/* Memory barrier to prevent the CPU from doing speculative reads of CQE
+	 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+	 * read before it is written by FW, then FW writes CQE and SB, and then
+	 * the CPU reads the hw_comp_cons, it will use an old CQE.
+	 */
+	rmb();
+
+	/* Loop to complete all indicated BDs */
+	while (sw_comp_cons != hw_comp_cons) {
+		struct eth_fast_path_rx_reg_cqe *fp_cqe;
+		enum pkt_hash_types rxhash_type;
+		enum eth_rx_cqe_type cqe_type;
+		struct sw_rx_data *sw_rx_data;
+		union eth_rx_cqe *cqe;
+		struct sk_buff *skb;
+		u16 len, pad;
+		u32 rx_hash;
+		u8 *data;
+
+		/* Get the CQE from the completion ring */
+		cqe = (union eth_rx_cqe *)
+			qed_chain_consume(&rxq->rx_comp_ring);
+		cqe_type = cqe->fast_path_regular.type;
+
+		if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+			edev->ops->eth_cqe_completion(
+					edev->cdev, fp->rss_id,
+					(struct eth_slow_path_rx_cqe *)cqe);
+			goto next_cqe;
+		}
+
+		/* Get the data from the SW ring */
+		sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+		sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+		data = sw_rx_data->data;
+
+		fp_cqe = &cqe->fast_path_regular;
+		len =  le16_to_cpu(fp_cqe->pkt_len);
+		pad = fp_cqe->placement_offset;
+
+		/* For every Rx BD consumed, we allocate a new BD so the BD ring
+		 * is always with a fixed size. If allocation fails, we take the
+		 * consumed BD and return it to the ring in the PROD position.
+		 * The packet that was received on that BD will be dropped (and
+		 * not passed to the upper stack).
+		 */
+		if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) {
+			dma_unmap_single(&edev->pdev->dev,
+					 dma_unmap_addr(sw_rx_data, mapping),
+					 rxq->rx_buf_size, DMA_FROM_DEVICE);
+
+			/* If this is an error packet then drop it */
+			parse_flag =
+			le16_to_cpu(cqe->fast_path_regular.pars_flags.flags);
+			csum_flag = qede_check_csum(parse_flag);
+			if (csum_flag == QEDE_CSUM_ERROR) {
+				DP_NOTICE(edev,
+					  "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
+					  sw_comp_cons, parse_flag);
+				rxq->rx_hw_errors++;
+				kfree(data);
+				goto next_rx;
+			}
+
+			skb = build_skb(data, 0);
+
+			if (unlikely(!skb)) {
+				DP_NOTICE(edev,
+					  "Build_skb failed, dropping incoming packet\n");
+				kfree(data);
+				rxq->rx_alloc_errors++;
+				goto next_rx;
+			}
+
+			skb_reserve(skb, pad);
+
+		} else {
+			DP_NOTICE(edev,
+				  "New buffer allocation failed, dropping incoming packet and reusing its buffer\n");
+			qede_reuse_rx_data(rxq);
+			rxq->rx_alloc_errors++;
+			goto next_cqe;
+		}
+
+		sw_rx_data->data = NULL;
+
+		skb_put(skb, len);
+
+		skb->protocol = eth_type_trans(skb, edev->ndev);
+
+		rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
+					  fp_cqe->rss_hash,
+					  &rxhash_type);
+
+		skb_set_hash(skb, rx_hash, rxhash_type);
+
+		qede_set_skb_csum(skb, csum_flag);
+
+		skb_record_rx_queue(skb, fp->rss_id);
+
+		qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
+
+		qed_chain_consume(&rxq->rx_bd_ring);
+
+next_rx:
+		rxq->sw_rx_cons++;
+		rx_pkt++;
+
+next_cqe: /* don't consume bd rx buffer */
+		qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+		sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+		/* CR TPA - revisit how to handle budget in TPA perhaps
+		 * increase on "end"
+		 */
+		if (rx_pkt == budget)
+			break;
+	} /* repeat while sw_comp_cons != hw_comp_cons... */
+
+	/* Update producers */
+	qede_update_rx_prod(edev, rxq);
+
+	return rx_pkt;
+}
+
+static int qede_poll(struct napi_struct *napi, int budget)
+{
+	int work_done = 0;
+	struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
+						 napi);
+	struct qede_dev *edev = fp->edev;
+
+	while (1) {
+		u8 tc;
+
+		for (tc = 0; tc < edev->num_tc; tc++)
+			if (qede_txq_has_work(&fp->txqs[tc]))
+				qede_tx_int(edev, &fp->txqs[tc]);
+
+		if (qede_has_rx_work(fp->rxq)) {
+			work_done += qede_rx_int(fp, budget - work_done);
+
+			/* must not complete if we consumed full budget */
+			if (work_done >= budget)
+				break;
+		}
+
+		/* Fall out from the NAPI loop if needed */
+		if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
+			qed_sb_update_sb_idx(fp->sb_info);
+			/* *_has_*_work() reads the status block,
+			 * thus we need to ensure that status block indices
+			 * have been actually read (qed_sb_update_sb_idx)
+			 * prior to this check (*_has_*_work) so that
+			 * we won't write the "newer" value of the status block
+			 * to HW (if there was a DMA right after
+			 * qede_has_rx_work and if there is no rmb, the memory
+			 * reading (qed_sb_update_sb_idx) may be postponed
+			 * to right before *_ack_sb). In this case there
+			 * will never be another interrupt until there is
+			 * another update of the status block, while there
+			 * is still unhandled work.
+			 */
+			rmb();
+
+			if (!(qede_has_rx_work(fp->rxq) ||
+			      qede_has_tx_work(fp))) {
+				napi_complete(napi);
+				/* Update and reenable interrupts */
+				qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
+					   1 /*update*/);
+				break;
+			}
+		}
+	}
+
+	return work_done;
+}
+
+static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
+{
+	struct qede_fastpath *fp = fp_cookie;
+
+	qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+
+	napi_schedule_irqoff(&fp->napi);
+	return IRQ_HANDLED;
+}
+
+/* -------------------------------------------------------------------------
+ * END OF FAST-PATH
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_open(struct net_device *ndev);
+static int qede_close(struct net_device *ndev);
+static int qede_set_mac_addr(struct net_device *ndev, void *p);
+static void qede_set_rx_mode(struct net_device *ndev);
+static void qede_config_rx_mode(struct net_device *ndev);
+
+static int qede_set_ucast_rx_mac(struct qede_dev *edev,
+				 enum qed_filter_xcast_params_type opcode,
+				 unsigned char mac[ETH_ALEN])
+{
+	struct qed_filter_params filter_cmd;
+
+	memset(&filter_cmd, 0, sizeof(filter_cmd));
+	filter_cmd.type = QED_FILTER_TYPE_UCAST;
+	filter_cmd.filter.ucast.type = opcode;
+	filter_cmd.filter.ucast.mac_valid = 1;
+	ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+
+	return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+void qede_fill_by_demand_stats(struct qede_dev *edev)
+{
+	struct qed_eth_stats stats;
+
+	edev->ops->get_vport_stats(edev->cdev, &stats);
+	edev->stats.no_buff_discards = stats.no_buff_discards;
+	edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
+	edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
+	edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
+	edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
+	edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
+	edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
+	edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
+	edev->stats.mac_filter_discards = stats.mac_filter_discards;
+
+	edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
+	edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
+	edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
+	edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
+	edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
+	edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
+	edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
+	edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
+	edev->stats.coalesced_events = stats.tpa_coalesced_events;
+	edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
+	edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
+	edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
+
+	edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
+	edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
+	edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
+	edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
+	edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
+	edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
+	edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
+	edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
+	edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
+	edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
+	edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+	edev->stats.rx_crc_errors = stats.rx_crc_errors;
+	edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
+	edev->stats.rx_pause_frames = stats.rx_pause_frames;
+	edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
+	edev->stats.rx_align_errors = stats.rx_align_errors;
+	edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
+	edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
+	edev->stats.rx_jabbers = stats.rx_jabbers;
+	edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
+	edev->stats.rx_fragments = stats.rx_fragments;
+	edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
+	edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
+	edev->stats.tx_128_to_255_byte_packets =
+				stats.tx_128_to_255_byte_packets;
+	edev->stats.tx_256_to_511_byte_packets =
+				stats.tx_256_to_511_byte_packets;
+	edev->stats.tx_512_to_1023_byte_packets =
+				stats.tx_512_to_1023_byte_packets;
+	edev->stats.tx_1024_to_1518_byte_packets =
+				stats.tx_1024_to_1518_byte_packets;
+	edev->stats.tx_1519_to_2047_byte_packets =
+				stats.tx_1519_to_2047_byte_packets;
+	edev->stats.tx_2048_to_4095_byte_packets =
+				stats.tx_2048_to_4095_byte_packets;
+	edev->stats.tx_4096_to_9216_byte_packets =
+				stats.tx_4096_to_9216_byte_packets;
+	edev->stats.tx_9217_to_16383_byte_packets =
+				stats.tx_9217_to_16383_byte_packets;
+	edev->stats.tx_pause_frames = stats.tx_pause_frames;
+	edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
+	edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
+	edev->stats.tx_total_collisions = stats.tx_total_collisions;
+	edev->stats.brb_truncates = stats.brb_truncates;
+	edev->stats.brb_discards = stats.brb_discards;
+	edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+}
+
+static struct rtnl_link_stats64 *qede_get_stats64(
+			    struct net_device *dev,
+			    struct rtnl_link_stats64 *stats)
+{
+	struct qede_dev *edev = netdev_priv(dev);
+
+	qede_fill_by_demand_stats(edev);
+
+	stats->rx_packets = edev->stats.rx_ucast_pkts +
+			    edev->stats.rx_mcast_pkts +
+			    edev->stats.rx_bcast_pkts;
+	stats->tx_packets = edev->stats.tx_ucast_pkts +
+			    edev->stats.tx_mcast_pkts +
+			    edev->stats.tx_bcast_pkts;
+
+	stats->rx_bytes = edev->stats.rx_ucast_bytes +
+			  edev->stats.rx_mcast_bytes +
+			  edev->stats.rx_bcast_bytes;
+
+	stats->tx_bytes = edev->stats.tx_ucast_bytes +
+			  edev->stats.tx_mcast_bytes +
+			  edev->stats.tx_bcast_bytes;
+
+	stats->tx_errors = edev->stats.tx_err_drop_pkts;
+	stats->multicast = edev->stats.rx_mcast_pkts +
+			   edev->stats.rx_bcast_pkts;
+
+	stats->rx_fifo_errors = edev->stats.no_buff_discards;
+
+	stats->collisions = edev->stats.tx_total_collisions;
+	stats->rx_crc_errors = edev->stats.rx_crc_errors;
+	stats->rx_frame_errors = edev->stats.rx_align_errors;
+
+	return stats;
+}
+
+static const struct net_device_ops qede_netdev_ops = {
+	.ndo_open = qede_open,
+	.ndo_stop = qede_close,
+	.ndo_start_xmit = qede_start_xmit,
+	.ndo_set_rx_mode = qede_set_rx_mode,
+	.ndo_set_mac_address = qede_set_mac_addr,
+	.ndo_validate_addr = eth_validate_addr,
+	.ndo_change_mtu = qede_change_mtu,
+	.ndo_get_stats64 = qede_get_stats64,
+};
+
+/* -------------------------------------------------------------------------
+ * START OF PROBE / REMOVE
+ * -------------------------------------------------------------------------
+ */
+
+static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
+					    struct pci_dev *pdev,
+					    struct qed_dev_eth_info *info,
+					    u32 dp_module,
+					    u8 dp_level)
+{
+	struct net_device *ndev;
+	struct qede_dev *edev;
+
+	ndev = alloc_etherdev_mqs(sizeof(*edev),
+				  info->num_queues,
+				  info->num_queues);
+	if (!ndev) {
+		pr_err("etherdev allocation failed\n");
+		return NULL;
+	}
+
+	edev = netdev_priv(ndev);
+	edev->ndev = ndev;
+	edev->cdev = cdev;
+	edev->pdev = pdev;
+	edev->dp_module = dp_module;
+	edev->dp_level = dp_level;
+	edev->ops = qed_ops;
+	edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+	edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+
+	DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
+
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+
+	memset(&edev->stats, 0, sizeof(edev->stats));
+	memcpy(&edev->dev_info, info, sizeof(*info));
+
+	edev->num_tc = edev->dev_info.num_tc;
+
+	return edev;
+}
+
+static void qede_init_ndev(struct qede_dev *edev)
+{
+	struct net_device *ndev = edev->ndev;
+	struct pci_dev *pdev = edev->pdev;
+	u32 hw_features;
+
+	pci_set_drvdata(pdev, ndev);
+
+	ndev->mem_start = edev->dev_info.common.pci_mem_start;
+	ndev->base_addr = ndev->mem_start;
+	ndev->mem_end = edev->dev_info.common.pci_mem_end;
+	ndev->irq = edev->dev_info.common.pci_irq;
+
+	ndev->watchdog_timeo = TX_TIMEOUT;
+
+	ndev->netdev_ops = &qede_netdev_ops;
+
+	qede_set_ethtool_ops(ndev);
+
+	/* user-changeble features */
+	hw_features = NETIF_F_GRO | NETIF_F_SG |
+		      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+		      NETIF_F_TSO | NETIF_F_TSO6;
+
+	ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+			      NETIF_F_HIGHDMA;
+	ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+			 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
+			 NETIF_F_HW_VLAN_CTAG_TX;
+
+	ndev->hw_features = hw_features;
+
+	/* Set network device HW mac */
+	ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+}
+
+/* This function converts from 32b param to two params of level and module
+ * Input 32b decoding:
+ * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
+ * 'happy' flow, e.g. memory allocation failed.
+ * b30 - enable all INFO prints. INFO prints are for major steps in the flow
+ * and provide important parameters.
+ * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
+ * module. VERBOSE prints are for tracking the specific flow in low level.
+ *
+ * Notice that the level should be that of the lowest required logs.
+ */
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
+{
+	*p_dp_level = QED_LEVEL_NOTICE;
+	*p_dp_module = 0;
+
+	if (debug & QED_LOG_VERBOSE_MASK) {
+		*p_dp_level = QED_LEVEL_VERBOSE;
+		*p_dp_module = (debug & 0x3FFFFFFF);
+	} else if (debug & QED_LOG_INFO_MASK) {
+		*p_dp_level = QED_LEVEL_INFO;
+	} else if (debug & QED_LOG_NOTICE_MASK) {
+		*p_dp_level = QED_LEVEL_NOTICE;
+	}
+}
+
+static void qede_free_fp_array(struct qede_dev *edev)
+{
+	if (edev->fp_array) {
+		struct qede_fastpath *fp;
+		int i;
+
+		for_each_rss(i) {
+			fp = &edev->fp_array[i];
+
+			kfree(fp->sb_info);
+			kfree(fp->rxq);
+			kfree(fp->txqs);
+		}
+		kfree(edev->fp_array);
+	}
+	edev->num_rss = 0;
+}
+
+static int qede_alloc_fp_array(struct qede_dev *edev)
+{
+	struct qede_fastpath *fp;
+	int i;
+
+	edev->fp_array = kcalloc(QEDE_RSS_CNT(edev),
+				 sizeof(*edev->fp_array), GFP_KERNEL);
+	if (!edev->fp_array) {
+		DP_NOTICE(edev, "fp array allocation failed\n");
+		goto err;
+	}
+
+	for_each_rss(i) {
+		fp = &edev->fp_array[i];
+
+		fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+		if (!fp->sb_info) {
+			DP_NOTICE(edev, "sb info struct allocation failed\n");
+			goto err;
+		}
+
+		fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
+		if (!fp->rxq) {
+			DP_NOTICE(edev, "RXQ struct allocation failed\n");
+			goto err;
+		}
+
+		fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL);
+		if (!fp->txqs) {
+			DP_NOTICE(edev, "TXQ array allocation failed\n");
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	qede_free_fp_array(edev);
+	return -ENOMEM;
+}
+
+static void qede_sp_task(struct work_struct *work)
+{
+	struct qede_dev *edev = container_of(work, struct qede_dev,
+					     sp_task.work);
+	mutex_lock(&edev->qede_lock);
+
+	if (edev->state == QEDE_STATE_OPEN) {
+		if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+			qede_config_rx_mode(edev->ndev);
+	}
+
+	mutex_unlock(&edev->qede_lock);
+}
+
+static void qede_update_pf_params(struct qed_dev *cdev)
+{
+	struct qed_pf_params pf_params;
+
+	/* 16 rx + 16 tx */
+	memset(&pf_params, 0, sizeof(struct qed_pf_params));
+	pf_params.eth_pf_params.num_cons = 32;
+	qed_ops->common->update_pf_params(cdev, &pf_params);
+}
+
+enum qede_probe_mode {
+	QEDE_PROBE_NORMAL,
+};
+
+static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+			enum qede_probe_mode mode)
+{
+	struct qed_slowpath_params params;
+	struct qed_dev_eth_info dev_info;
+	struct qede_dev *edev;
+	struct qed_dev *cdev;
+	int rc;
+
+	if (unlikely(dp_level & QED_LEVEL_INFO))
+		pr_notice("Starting qede probe\n");
+
+	cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
+				      dp_module, dp_level);
+	if (!cdev) {
+		rc = -ENODEV;
+		goto err0;
+	}
+
+	qede_update_pf_params(cdev);
+
+	/* Start the Slowpath-process */
+	memset(&params, 0, sizeof(struct qed_slowpath_params));
+	params.int_mode = QED_INT_MODE_MSIX;
+	params.drv_major = QEDE_MAJOR_VERSION;
+	params.drv_minor = QEDE_MINOR_VERSION;
+	params.drv_rev = QEDE_REVISION_VERSION;
+	params.drv_eng = QEDE_ENGINEERING_VERSION;
+	strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+	rc = qed_ops->common->slowpath_start(cdev, &params);
+	if (rc) {
+		pr_notice("Cannot start slowpath\n");
+		goto err1;
+	}
+
+	/* Learn information crucial for qede to progress */
+	rc = qed_ops->fill_dev_info(cdev, &dev_info);
+	if (rc)
+		goto err2;
+
+	edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
+				   dp_level);
+	if (!edev) {
+		rc = -ENOMEM;
+		goto err2;
+	}
+
+	qede_init_ndev(edev);
+
+	rc = register_netdev(edev->ndev);
+	if (rc) {
+		DP_NOTICE(edev, "Cannot register net-device\n");
+		goto err3;
+	}
+
+	edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
+
+	edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+
+	INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+	mutex_init(&edev->qede_lock);
+
+	DP_INFO(edev, "Ending successfully qede probe\n");
+
+	return 0;
+
+err3:
+	free_netdev(edev->ndev);
+err2:
+	qed_ops->common->slowpath_stop(cdev);
+err1:
+	qed_ops->common->remove(cdev);
+err0:
+	return rc;
+}
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	u32 dp_module = 0;
+	u8 dp_level = 0;
+
+	qede_config_debug(debug, &dp_module, &dp_level);
+
+	return __qede_probe(pdev, dp_module, dp_level,
+			    QEDE_PROBE_NORMAL);
+}
+
+enum qede_remove_mode {
+	QEDE_REMOVE_NORMAL,
+};
+
+static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+{
+	struct net_device *ndev = pci_get_drvdata(pdev);
+	struct qede_dev *edev = netdev_priv(ndev);
+	struct qed_dev *cdev = edev->cdev;
+
+	DP_INFO(edev, "Starting qede_remove\n");
+
+	cancel_delayed_work_sync(&edev->sp_task);
+	unregister_netdev(ndev);
+
+	edev->ops->common->set_power_state(cdev, PCI_D0);
+
+	pci_set_drvdata(pdev, NULL);
+
+	free_netdev(ndev);
+
+	/* Use global ops since we've freed edev */
+	qed_ops->common->slowpath_stop(cdev);
+	qed_ops->common->remove(cdev);
+
+	pr_notice("Ending successfully qede_remove\n");
+}
+
+static void qede_remove(struct pci_dev *pdev)
+{
+	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
+/* -------------------------------------------------------------------------
+ * START OF LOAD / UNLOAD
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_set_num_queues(struct qede_dev *edev)
+{
+	int rc;
+	u16 rss_num;
+
+	/* Setup queues according to possible resources*/
+	rss_num = netif_get_num_default_rss_queues() *
+		  edev->dev_info.common.num_hwfns;
+
+	rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
+
+	rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
+	if (rc > 0) {
+		/* Managed to request interrupts for our queues */
+		edev->num_rss = rc;
+		DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
+			QEDE_RSS_CNT(edev), rss_num);
+		rc = 0;
+	}
+	return rc;
+}
+
+static void qede_free_mem_sb(struct qede_dev *edev,
+			     struct qed_sb_info *sb_info)
+{
+	if (sb_info->sb_virt)
+		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
+				  (void *)sb_info->sb_virt, sb_info->sb_phys);
+}
+
+/* This function allocates fast-path status block memory */
+static int qede_alloc_mem_sb(struct qede_dev *edev,
+			     struct qed_sb_info *sb_info,
+			     u16 sb_id)
+{
+	struct status_block *sb_virt;
+	dma_addr_t sb_phys;
+	int rc;
+
+	sb_virt = dma_alloc_coherent(&edev->pdev->dev,
+				     sizeof(*sb_virt),
+				     &sb_phys, GFP_KERNEL);
+	if (!sb_virt) {
+		DP_ERR(edev, "Status block allocation failed\n");
+		return -ENOMEM;
+	}
+
+	rc = edev->ops->common->sb_init(edev->cdev, sb_info,
+					sb_virt, sb_phys, sb_id,
+					QED_SB_TYPE_L2_QUEUE);
+	if (rc) {
+		DP_ERR(edev, "Status block initialization failed\n");
+		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
+				  sb_virt, sb_phys);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void qede_free_rx_buffers(struct qede_dev *edev,
+				 struct qede_rx_queue *rxq)
+{
+	u16 i;
+
+	for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
+		struct sw_rx_data *rx_buf;
+		u8 *data;
+
+		rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
+		data = rx_buf->data;
+
+		dma_unmap_single(&edev->pdev->dev,
+				 dma_unmap_addr(rx_buf, mapping),
+				 rxq->rx_buf_size, DMA_FROM_DEVICE);
+
+		rx_buf->data = NULL;
+		kfree(data);
+	}
+}
+
+static void qede_free_mem_rxq(struct qede_dev *edev,
+			      struct qede_rx_queue *rxq)
+{
+	/* Free rx buffers */
+	qede_free_rx_buffers(edev, rxq);
+
+	/* Free the parallel SW ring */
+	kfree(rxq->sw_rx_ring);
+
+	/* Free the real RQ ring used by FW */
+	edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
+	edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
+}
+
+static int qede_alloc_rx_buffer(struct qede_dev *edev,
+				struct qede_rx_queue *rxq)
+{
+	struct sw_rx_data *sw_rx_data;
+	struct eth_rx_bd *rx_bd;
+	dma_addr_t mapping;
+	u16 rx_buf_size;
+	u8 *data;
+
+	rx_buf_size = rxq->rx_buf_size;
+
+	data = kmalloc(rx_buf_size, GFP_ATOMIC);
+	if (unlikely(!data)) {
+		DP_NOTICE(edev, "Failed to allocate Rx data\n");
+		return -ENOMEM;
+	}
+
+	mapping = dma_map_single(&edev->pdev->dev, data,
+				 rx_buf_size, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+		kfree(data);
+		DP_NOTICE(edev, "Failed to map Rx buffer\n");
+		return -ENOMEM;
+	}
+
+	sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+	sw_rx_data->data = data;
+
+	dma_unmap_addr_set(sw_rx_data, mapping, mapping);
+
+	/* Advance PROD and get BD pointer */
+	rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+	WARN_ON(!rx_bd);
+	rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+	rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+	rxq->sw_rx_prod++;
+
+	return 0;
+}
+
+/* This function allocates all memory needed per Rx queue */
+static int qede_alloc_mem_rxq(struct qede_dev *edev,
+			      struct qede_rx_queue *rxq)
+{
+	int i, rc, size, num_allocated;
+
+	rxq->num_rx_buffers = edev->q_num_rx_buffers;
+
+	rxq->rx_buf_size = NET_IP_ALIGN +
+			   ETH_OVERHEAD +
+			   edev->ndev->mtu +
+			   QEDE_FW_RX_ALIGN_END;
+
+	/* Allocate the parallel driver ring for Rx buffers */
+	size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX;
+	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
+	if (!rxq->sw_rx_ring) {
+		DP_ERR(edev, "Rx buffers ring allocation failed\n");
+		goto err;
+	}
+
+	/* Allocate FW Rx ring  */
+	rc = edev->ops->common->chain_alloc(edev->cdev,
+					    QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+					    QED_CHAIN_MODE_NEXT_PTR,
+					    NUM_RX_BDS_MAX,
+					    sizeof(struct eth_rx_bd),
+					    &rxq->rx_bd_ring);
+
+	if (rc)
+		goto err;
+
+	/* Allocate FW completion ring */
+	rc = edev->ops->common->chain_alloc(edev->cdev,
+					    QED_CHAIN_USE_TO_CONSUME,
+					    QED_CHAIN_MODE_PBL,
+					    NUM_RX_BDS_MAX,
+					    sizeof(union eth_rx_cqe),
+					    &rxq->rx_comp_ring);
+	if (rc)
+		goto err;
+
+	/* Allocate buffers for the Rx ring */
+	for (i = 0; i < rxq->num_rx_buffers; i++) {
+		rc = qede_alloc_rx_buffer(edev, rxq);
+		if (rc)
+			break;
+	}
+	num_allocated = i;
+	if (!num_allocated) {
+		DP_ERR(edev, "Rx buffers allocation failed\n");
+		goto err;
+	} else if (num_allocated < rxq->num_rx_buffers) {
+		DP_NOTICE(edev,
+			  "Allocated less buffers than desired (%d allocated)\n",
+			  num_allocated);
+	}
+
+	return 0;
+
+err:
+	qede_free_mem_rxq(edev, rxq);
+	return -ENOMEM;
+}
+
+static void qede_free_mem_txq(struct qede_dev *edev,
+			      struct qede_tx_queue *txq)
+{
+	/* Free the parallel SW ring */
+	kfree(txq->sw_tx_ring);
+
+	/* Free the real RQ ring used by FW */
+	edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
+}
+
+/* This function allocates all memory needed per Tx queue */
+static int qede_alloc_mem_txq(struct qede_dev *edev,
+			      struct qede_tx_queue *txq)
+{
+	int size, rc;
+	union eth_tx_bd_types *p_virt;
+
+	txq->num_tx_buffers = edev->q_num_tx_buffers;
+
+	/* Allocate the parallel driver ring for Tx buffers */
+	size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
+	txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
+	if (!txq->sw_tx_ring) {
+		DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
+		goto err;
+	}
+
+	rc = edev->ops->common->chain_alloc(edev->cdev,
+					    QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+					    QED_CHAIN_MODE_PBL,
+					    NUM_TX_BDS_MAX,
+					    sizeof(*p_virt),
+					    &txq->tx_pbl);
+	if (rc)
+		goto err;
+
+	return 0;
+
+err:
+	qede_free_mem_txq(edev, txq);
+	return -ENOMEM;
+}
+
+/* This function frees all memory of a single fp */
+static void qede_free_mem_fp(struct qede_dev *edev,
+			     struct qede_fastpath *fp)
+{
+	int tc;
+
+	qede_free_mem_sb(edev, fp->sb_info);
+
+	qede_free_mem_rxq(edev, fp->rxq);
+
+	for (tc = 0; tc < edev->num_tc; tc++)
+		qede_free_mem_txq(edev, &fp->txqs[tc]);
+}
+
+/* This function allocates all memory needed for a single fp (i.e. an entity
+ * which contains status block, one rx queue and multiple per-TC tx queues.
+ */
+static int qede_alloc_mem_fp(struct qede_dev *edev,
+			     struct qede_fastpath *fp)
+{
+	int rc, tc;
+
+	rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id);
+	if (rc)
+		goto err;
+
+	rc = qede_alloc_mem_rxq(edev, fp->rxq);
+	if (rc)
+		goto err;
+
+	for (tc = 0; tc < edev->num_tc; tc++) {
+		rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
+		if (rc)
+			goto err;
+	}
+
+	return 0;
+
+err:
+	qede_free_mem_fp(edev, fp);
+	return -ENOMEM;
+}
+
+static void qede_free_mem_load(struct qede_dev *edev)
+{
+	int i;
+
+	for_each_rss(i) {
+		struct qede_fastpath *fp = &edev->fp_array[i];
+
+		qede_free_mem_fp(edev, fp);
+	}
+}
+
+/* This function allocates all qede memory at NIC load. */
+static int qede_alloc_mem_load(struct qede_dev *edev)
+{
+	int rc = 0, rss_id;
+
+	for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) {
+		struct qede_fastpath *fp = &edev->fp_array[rss_id];
+
+		rc = qede_alloc_mem_fp(edev, fp);
+		if (rc)
+			break;
+	}
+
+	if (rss_id != QEDE_RSS_CNT(edev)) {
+		/* Failed allocating memory for all the queues */
+		if (!rss_id) {
+			DP_ERR(edev,
+			       "Failed to allocate memory for the leading queue\n");
+			rc = -ENOMEM;
+		} else {
+			DP_NOTICE(edev,
+				  "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
+				  QEDE_RSS_CNT(edev), rss_id);
+		}
+		edev->num_rss = rss_id;
+	}
+
+	return 0;
+}
+
+/* This function inits fp content and resets the SB, RXQ and TXQ structures */
+static void qede_init_fp(struct qede_dev *edev)
+{
+	int rss_id, txq_index, tc;
+	struct qede_fastpath *fp;
+
+	for_each_rss(rss_id) {
+		fp = &edev->fp_array[rss_id];
+
+		fp->edev = edev;
+		fp->rss_id = rss_id;
+
+		memset((void *)&fp->napi, 0, sizeof(fp->napi));
+
+		memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
+
+		memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
+		fp->rxq->rxq_id = rss_id;
+
+		memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs)));
+		for (tc = 0; tc < edev->num_tc; tc++) {
+			txq_index = tc * QEDE_RSS_CNT(edev) + rss_id;
+			fp->txqs[tc].index = txq_index;
+		}
+
+		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+			 edev->ndev->name, rss_id);
+	}
+}
+
+static int qede_set_real_num_queues(struct qede_dev *edev)
+{
+	int rc = 0;
+
+	rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev));
+	if (rc) {
+		DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
+		return rc;
+	}
+	rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev));
+	if (rc) {
+		DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static void qede_napi_disable_remove(struct qede_dev *edev)
+{
+	int i;
+
+	for_each_rss(i) {
+		napi_disable(&edev->fp_array[i].napi);
+
+		netif_napi_del(&edev->fp_array[i].napi);
+	}
+}
+
+static void qede_napi_add_enable(struct qede_dev *edev)
+{
+	int i;
+
+	/* Add NAPI objects */
+	for_each_rss(i) {
+		netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
+			       qede_poll, NAPI_POLL_WEIGHT);
+		napi_enable(&edev->fp_array[i].napi);
+	}
+}
+
+static void qede_sync_free_irqs(struct qede_dev *edev)
+{
+	int i;
+
+	for (i = 0; i < edev->int_info.used_cnt; i++) {
+		if (edev->int_info.msix_cnt) {
+			synchronize_irq(edev->int_info.msix[i].vector);
+			free_irq(edev->int_info.msix[i].vector,
+				 &edev->fp_array[i]);
+		} else {
+			edev->ops->common->simd_handler_clean(edev->cdev, i);
+		}
+	}
+
+	edev->int_info.used_cnt = 0;
+}
+
+static int qede_req_msix_irqs(struct qede_dev *edev)
+{
+	int i, rc;
+
+	/* Sanitize number of interrupts == number of prepared RSS queues */
+	if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) {
+		DP_ERR(edev,
+		       "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
+		       QEDE_RSS_CNT(edev), edev->int_info.msix_cnt);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < QEDE_RSS_CNT(edev); i++) {
+		rc = request_irq(edev->int_info.msix[i].vector,
+				 qede_msix_fp_int, 0, edev->fp_array[i].name,
+				 &edev->fp_array[i]);
+		if (rc) {
+			DP_ERR(edev, "Request fp %d irq failed\n", i);
+			qede_sync_free_irqs(edev);
+			return rc;
+		}
+		DP_VERBOSE(edev, NETIF_MSG_INTR,
+			   "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
+			   edev->fp_array[i].name, i,
+			   &edev->fp_array[i]);
+		edev->int_info.used_cnt++;
+	}
+
+	return 0;
+}
+
+static void qede_simd_fp_handler(void *cookie)
+{
+	struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
+
+	napi_schedule_irqoff(&fp->napi);
+}
+
+static int qede_setup_irqs(struct qede_dev *edev)
+{
+	int i, rc = 0;
+
+	/* Learn Interrupt configuration */
+	rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
+	if (rc)
+		return rc;
+
+	if (edev->int_info.msix_cnt) {
+		rc = qede_req_msix_irqs(edev);
+		if (rc)
+			return rc;
+		edev->ndev->irq = edev->int_info.msix[0].vector;
+	} else {
+		const struct qed_common_ops *ops;
+
+		/* qed should learn receive the RSS ids and callbacks */
+		ops = edev->ops->common;
+		for (i = 0; i < QEDE_RSS_CNT(edev); i++)
+			ops->simd_handler_config(edev->cdev,
+						 &edev->fp_array[i], i,
+						 qede_simd_fp_handler);
+		edev->int_info.used_cnt = QEDE_RSS_CNT(edev);
+	}
+	return 0;
+}
+
+static int qede_drain_txq(struct qede_dev *edev,
+			  struct qede_tx_queue *txq,
+			  bool allow_drain)
+{
+	int rc, cnt = 1000;
+
+	while (txq->sw_tx_cons != txq->sw_tx_prod) {
+		if (!cnt) {
+			if (allow_drain) {
+				DP_NOTICE(edev,
+					  "Tx queue[%d] is stuck, requesting MCP to drain\n",
+					  txq->index);
+				rc = edev->ops->common->drain(edev->cdev);
+				if (rc)
+					return rc;
+				return qede_drain_txq(edev, txq, false);
+			}
+			DP_NOTICE(edev,
+				  "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
+				  txq->index, txq->sw_tx_prod,
+				  txq->sw_tx_cons);
+			return -ENODEV;
+		}
+		cnt--;
+		usleep_range(1000, 2000);
+		barrier();
+	}
+
+	/* FW finished processing, wait for HW to transmit all tx packets */
+	usleep_range(1000, 2000);
+
+	return 0;
+}
+
+static int qede_stop_queues(struct qede_dev *edev)
+{
+	struct qed_update_vport_params vport_update_params;
+	struct qed_dev *cdev = edev->cdev;
+	int rc, tc, i;
+
+	/* Disable the vport */
+	memset(&vport_update_params, 0, sizeof(vport_update_params));
+	vport_update_params.vport_id = 0;
+	vport_update_params.update_vport_active_flg = 1;
+	vport_update_params.vport_active_flg = 0;
+	vport_update_params.update_rss_flg = 0;
+
+	rc = edev->ops->vport_update(cdev, &vport_update_params);
+	if (rc) {
+		DP_ERR(edev, "Failed to update vport\n");
+		return rc;
+	}
+
+	/* Flush Tx queues. If needed, request drain from MCP */
+	for_each_rss(i) {
+		struct qede_fastpath *fp = &edev->fp_array[i];
+
+		for (tc = 0; tc < edev->num_tc; tc++) {
+			struct qede_tx_queue *txq = &fp->txqs[tc];
+
+			rc = qede_drain_txq(edev, txq, true);
+			if (rc)
+				return rc;
+		}
+	}
+
+	/* Stop all Queues in reverse order*/
+	for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) {
+		struct qed_stop_rxq_params rx_params;
+
+		/* Stop the Tx Queue(s)*/
+		for (tc = 0; tc < edev->num_tc; tc++) {
+			struct qed_stop_txq_params tx_params;
+
+			tx_params.rss_id = i;
+			tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i;
+			rc = edev->ops->q_tx_stop(cdev, &tx_params);
+			if (rc) {
+				DP_ERR(edev, "Failed to stop TXQ #%d\n",
+				       tx_params.tx_queue_id);
+				return rc;
+			}
+		}
+
+		/* Stop the Rx Queue*/
+		memset(&rx_params, 0, sizeof(rx_params));
+		rx_params.rss_id = i;
+		rx_params.rx_queue_id = i;
+
+		rc = edev->ops->q_rx_stop(cdev, &rx_params);
+		if (rc) {
+			DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+			return rc;
+		}
+	}
+
+	/* Stop the vport */
+	rc = edev->ops->vport_stop(cdev, 0);
+	if (rc)
+		DP_ERR(edev, "Failed to stop VPORT\n");
+
+	return rc;
+}
+
+static int qede_start_queues(struct qede_dev *edev)
+{
+	int rc, tc, i;
+	int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1;
+	struct qed_dev *cdev = edev->cdev;
+	struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
+	struct qed_update_vport_params vport_update_params;
+	struct qed_queue_start_common_params q_params;
+
+	if (!edev->num_rss) {
+		DP_ERR(edev,
+		       "Cannot update V-VPORT as active as there are no Rx queues\n");
+		return -EINVAL;
+	}
+
+	rc = edev->ops->vport_start(cdev, vport_id,
+				    edev->ndev->mtu,
+				    drop_ttl0_flg,
+				    vlan_removal_en);
+
+	if (rc) {
+		DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+		return rc;
+	}
+
+	DP_VERBOSE(edev, NETIF_MSG_IFUP,
+		   "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
+		   vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
+
+	for_each_rss(i) {
+		struct qede_fastpath *fp = &edev->fp_array[i];
+		dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table;
+
+		memset(&q_params, 0, sizeof(q_params));
+		q_params.rss_id = i;
+		q_params.queue_id = i;
+		q_params.vport_id = 0;
+		q_params.sb = fp->sb_info->igu_sb_id;
+		q_params.sb_idx = RX_PI;
+
+		rc = edev->ops->q_rx_start(cdev, &q_params,
+					   fp->rxq->rx_buf_size,
+					   fp->rxq->rx_bd_ring.p_phys_addr,
+					   phys_table,
+					   fp->rxq->rx_comp_ring.page_cnt,
+					   &fp->rxq->hw_rxq_prod_addr);
+		if (rc) {
+			DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
+			return rc;
+		}
+
+		fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+
+		qede_update_rx_prod(edev, fp->rxq);
+
+		for (tc = 0; tc < edev->num_tc; tc++) {
+			struct qede_tx_queue *txq = &fp->txqs[tc];
+			int txq_index = tc * QEDE_RSS_CNT(edev) + i;
+
+			memset(&q_params, 0, sizeof(q_params));
+			q_params.rss_id = i;
+			q_params.queue_id = txq_index;
+			q_params.vport_id = 0;
+			q_params.sb = fp->sb_info->igu_sb_id;
+			q_params.sb_idx = TX_PI(tc);
+
+			rc = edev->ops->q_tx_start(cdev, &q_params,
+						   txq->tx_pbl.pbl.p_phys_table,
+						   txq->tx_pbl.page_cnt,
+						   &txq->doorbell_addr);
+			if (rc) {
+				DP_ERR(edev, "Start TXQ #%d failed %d\n",
+				       txq_index, rc);
+				return rc;
+			}
+
+			txq->hw_cons_ptr =
+				&fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
+			SET_FIELD(txq->tx_db.data.params,
+				  ETH_DB_DATA_DEST, DB_DEST_XCM);
+			SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+				  DB_AGG_CMD_SET);
+			SET_FIELD(txq->tx_db.data.params,
+				  ETH_DB_DATA_AGG_VAL_SEL,
+				  DQ_XCM_ETH_TX_BD_PROD_CMD);
+
+			txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+		}
+	}
+
+	/* Prepare and send the vport enable */
+	memset(&vport_update_params, 0, sizeof(vport_update_params));
+	vport_update_params.vport_id = vport_id;
+	vport_update_params.update_vport_active_flg = 1;
+	vport_update_params.vport_active_flg = 1;
+
+	/* Fill struct with RSS params */
+	if (QEDE_RSS_CNT(edev) > 1) {
+		vport_update_params.update_rss_flg = 1;
+		for (i = 0; i < 128; i++)
+			rss_params->rss_ind_table[i] =
+			ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
+		netdev_rss_key_fill(rss_params->rss_key,
+				    sizeof(rss_params->rss_key));
+	} else {
+		memset(rss_params, 0, sizeof(*rss_params));
+	}
+	memcpy(&vport_update_params.rss_params, rss_params,
+	       sizeof(*rss_params));
+
+	rc = edev->ops->vport_update(cdev, &vport_update_params);
+	if (rc) {
+		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int qede_set_mcast_rx_mac(struct qede_dev *edev,
+				 enum qed_filter_xcast_params_type opcode,
+				 unsigned char *mac, int num_macs)
+{
+	struct qed_filter_params filter_cmd;
+	int i;
+
+	memset(&filter_cmd, 0, sizeof(filter_cmd));
+	filter_cmd.type = QED_FILTER_TYPE_MCAST;
+	filter_cmd.filter.mcast.type = opcode;
+	filter_cmd.filter.mcast.num = num_macs;
+
+	for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
+		ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+
+	return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+enum qede_unload_mode {
+	QEDE_UNLOAD_NORMAL,
+};
+
+static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
+{
+	struct qed_link_params link_params;
+	int rc;
+
+	DP_INFO(edev, "Starting qede unload\n");
+
+	mutex_lock(&edev->qede_lock);
+	edev->state = QEDE_STATE_CLOSED;
+
+	/* Close OS Tx */
+	netif_tx_disable(edev->ndev);
+	netif_carrier_off(edev->ndev);
+
+	/* Reset the link */
+	memset(&link_params, 0, sizeof(link_params));
+	link_params.link_up = false;
+	edev->ops->common->set_link(edev->cdev, &link_params);
+	rc = qede_stop_queues(edev);
+	if (rc) {
+		qede_sync_free_irqs(edev);
+		goto out;
+	}
+
+	DP_INFO(edev, "Stopped Queues\n");
+
+	edev->ops->fastpath_stop(edev->cdev);
+
+	/* Release the interrupts */
+	qede_sync_free_irqs(edev);
+	edev->ops->common->set_fp_int(edev->cdev, 0);
+
+	qede_napi_disable_remove(edev);
+
+	qede_free_mem_load(edev);
+	qede_free_fp_array(edev);
+
+out:
+	mutex_unlock(&edev->qede_lock);
+	DP_INFO(edev, "Ending qede unload\n");
+}
+
+enum qede_load_mode {
+	QEDE_LOAD_NORMAL,
+};
+
+static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
+{
+	struct qed_link_params link_params;
+	struct qed_link_output link_output;
+	int rc;
+
+	DP_INFO(edev, "Starting qede load\n");
+
+	rc = qede_set_num_queues(edev);
+	if (rc)
+		goto err0;
+
+	rc = qede_alloc_fp_array(edev);
+	if (rc)
+		goto err0;
+
+	qede_init_fp(edev);
+
+	rc = qede_alloc_mem_load(edev);
+	if (rc)
+		goto err1;
+	DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
+		QEDE_RSS_CNT(edev), edev->num_tc);
+
+	rc = qede_set_real_num_queues(edev);
+	if (rc)
+		goto err2;
+
+	qede_napi_add_enable(edev);
+	DP_INFO(edev, "Napi added and enabled\n");
+
+	rc = qede_setup_irqs(edev);
+	if (rc)
+		goto err3;
+	DP_INFO(edev, "Setup IRQs succeeded\n");
+
+	rc = qede_start_queues(edev);
+	if (rc)
+		goto err4;
+	DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+
+	/* Add primary mac and set Rx filters */
+	ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
+
+	mutex_lock(&edev->qede_lock);
+	edev->state = QEDE_STATE_OPEN;
+	mutex_unlock(&edev->qede_lock);
+
+	/* Ask for link-up using current configuration */
+	memset(&link_params, 0, sizeof(link_params));
+	link_params.link_up = true;
+	edev->ops->common->set_link(edev->cdev, &link_params);
+
+	/* Query whether link is already-up */
+	memset(&link_output, 0, sizeof(link_output));
+	edev->ops->common->get_link(edev->cdev, &link_output);
+	qede_link_update(edev, &link_output);
+
+	DP_INFO(edev, "Ending successfully qede load\n");
+
+	return 0;
+
+err4:
+	qede_sync_free_irqs(edev);
+	memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
+err3:
+	qede_napi_disable_remove(edev);
+err2:
+	qede_free_mem_load(edev);
+err1:
+	edev->ops->common->set_fp_int(edev->cdev, 0);
+	qede_free_fp_array(edev);
+	edev->num_rss = 0;
+err0:
+	return rc;
+}
+
+void qede_reload(struct qede_dev *edev,
+		 void (*func)(struct qede_dev *, union qede_reload_args *),
+		 union qede_reload_args *args)
+{
+	qede_unload(edev, QEDE_UNLOAD_NORMAL);
+	/* Call function handler to update parameters
+	 * needed for function load.
+	 */
+	if (func)
+		func(edev, args);
+
+	qede_load(edev, QEDE_LOAD_NORMAL);
+
+	mutex_lock(&edev->qede_lock);
+	qede_config_rx_mode(edev->ndev);
+	mutex_unlock(&edev->qede_lock);
+}
+
+/* called with rtnl_lock */
+static int qede_open(struct net_device *ndev)
+{
+	struct qede_dev *edev = netdev_priv(ndev);
+
+	netif_carrier_off(ndev);
+
+	edev->ops->common->set_power_state(edev->cdev, PCI_D0);
+
+	return qede_load(edev, QEDE_LOAD_NORMAL);
+}
+
+static int qede_close(struct net_device *ndev)
+{
+	struct qede_dev *edev = netdev_priv(ndev);
+
+	qede_unload(edev, QEDE_UNLOAD_NORMAL);
+
+	return 0;
+}
+
+static void qede_link_update(void *dev, struct qed_link_output *link)
+{
+	struct qede_dev *edev = dev;
+
+	if (!netif_running(edev->ndev)) {
+		DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
+		return;
+	}
+
+	if (link->link_up) {
+		DP_NOTICE(edev, "Link is up\n");
+		netif_tx_start_all_queues(edev->ndev);
+		netif_carrier_on(edev->ndev);
+	} else {
+		DP_NOTICE(edev, "Link is down\n");
+		netif_tx_disable(edev->ndev);
+		netif_carrier_off(edev->ndev);
+	}
+}
+
+static int qede_set_mac_addr(struct net_device *ndev, void *p)
+{
+	struct qede_dev *edev = netdev_priv(ndev);
+	struct sockaddr *addr = p;
+	int rc;
+
+	ASSERT_RTNL(); /* @@@TBD To be removed */
+
+	DP_INFO(edev, "Set_mac_addr called\n");
+
+	if (!is_valid_ether_addr(addr->sa_data)) {
+		DP_NOTICE(edev, "The MAC address is not valid\n");
+		return -EFAULT;
+	}
+
+	ether_addr_copy(ndev->dev_addr, addr->sa_data);
+
+	if (!netif_running(ndev))  {
+		DP_NOTICE(edev, "The device is currently down\n");
+		return 0;
+	}
+
+	/* Remove the previous primary mac */
+	rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+				   edev->primary_mac);
+	if (rc)
+		return rc;
+
+	/* Add MAC filter according to the new unicast HW MAC address */
+	ether_addr_copy(edev->primary_mac, ndev->dev_addr);
+	return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+				      edev->primary_mac);
+}
+
+static int
+qede_configure_mcast_filtering(struct net_device *ndev,
+			       enum qed_filter_rx_mode_type *accept_flags)
+{
+	struct qede_dev *edev = netdev_priv(ndev);
+	unsigned char *mc_macs, *temp;
+	struct netdev_hw_addr *ha;
+	int rc = 0, mc_count;
+	size_t size;
+
+	size = 64 * ETH_ALEN;
+
+	mc_macs = kzalloc(size, GFP_KERNEL);
+	if (!mc_macs) {
+		DP_NOTICE(edev,
+			  "Failed to allocate memory for multicast MACs\n");
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	temp = mc_macs;
+
+	/* Remove all previously configured MAC filters */
+	rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+				   mc_macs, 1);
+	if (rc)
+		goto exit;
+
+	netif_addr_lock_bh(ndev);
+
+	mc_count = netdev_mc_count(ndev);
+	if (mc_count < 64) {
+		netdev_for_each_mc_addr(ha, ndev) {
+			ether_addr_copy(temp, ha->addr);
+			temp += ETH_ALEN;
+		}
+	}
+
+	netif_addr_unlock_bh(ndev);
+
+	/* Check for all multicast @@@TBD resource allocation */
+	if ((ndev->flags & IFF_ALLMULTI) ||
+	    (mc_count > 64)) {
+		if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
+			*accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+	} else {
+		/* Add all multicast MAC filters */
+		rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+					   mc_macs, mc_count);
+	}
+
+exit:
+	kfree(mc_macs);
+	return rc;
+}
+
+static void qede_set_rx_mode(struct net_device *ndev)
+{
+	struct qede_dev *edev = netdev_priv(ndev);
+
+	DP_INFO(edev, "qede_set_rx_mode called\n");
+
+	if (edev->state != QEDE_STATE_OPEN) {
+		DP_INFO(edev,
+			"qede_set_rx_mode called while interface is down\n");
+	} else {
+		set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+		schedule_delayed_work(&edev->sp_task, 0);
+	}
+}
+
+/* Must be called with qede_lock held */
+static void qede_config_rx_mode(struct net_device *ndev)
+{
+	enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
+	struct qede_dev *edev = netdev_priv(ndev);
+	struct qed_filter_params rx_mode;
+	unsigned char *uc_macs, *temp;
+	struct netdev_hw_addr *ha;
+	int rc, uc_count;
+	size_t size;
+
+	netif_addr_lock_bh(ndev);
+
+	uc_count = netdev_uc_count(ndev);
+	size = uc_count * ETH_ALEN;
+
+	uc_macs = kzalloc(size, GFP_ATOMIC);
+	if (!uc_macs) {
+		DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
+		netif_addr_unlock_bh(ndev);
+		return;
+	}
+
+	temp = uc_macs;
+	netdev_for_each_uc_addr(ha, ndev) {
+		ether_addr_copy(temp, ha->addr);
+		temp += ETH_ALEN;
+	}
+
+	netif_addr_unlock_bh(ndev);
+
+	/* Configure the struct for the Rx mode */
+	memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+	rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+	/* Remove all previous unicast secondary macs and multicast macs
+	 * (configrue / leave the primary mac)
+	 */
+	rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
+				   edev->primary_mac);
+	if (rc)
+		goto out;
+
+	/* Check for promiscuous */
+	if ((ndev->flags & IFF_PROMISC) ||
+	    (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
+		accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+	} else {
+		/* Add MAC filters according to the unicast secondary macs */
+		int i;
+
+		temp = uc_macs;
+		for (i = 0; i < uc_count; i++) {
+			rc = qede_set_ucast_rx_mac(edev,
+						   QED_FILTER_XCAST_TYPE_ADD,
+						   temp);
+			if (rc)
+				goto out;
+
+			temp += ETH_ALEN;
+		}
+
+		rc = qede_configure_mcast_filtering(ndev, &accept_flags);
+		if (rc)
+			goto out;
+	}
+
+	rx_mode.filter.accept_flags = accept_flags;
+	edev->ops->filter_config(edev->cdev, &rx_mode);
+out:
+	kfree(uc_macs);
+}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 4847713..b09a6b8 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1736,8 +1736,6 @@
 		sizeof(drvinfo->version));
 	strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->regdump_len = 0;
-	drvinfo->eedump_len = 0;
 }
 
 static u32 ql_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index d6696cf..46bbea8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1092,7 +1092,7 @@
 struct qlcnic_mailbox {
 	struct workqueue_struct	*work_q;
 	struct qlcnic_adapter	*adapter;
-	struct qlcnic_mbx_ops	*ops;
+	const struct qlcnic_mbx_ops *ops;
 	struct work_struct	work;
 	struct completion	completion;
 	struct list_head	cmd_q;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 9f0bdd9..37a731b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -4048,7 +4048,7 @@
 	struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
 						  work);
 	struct qlcnic_adapter *adapter = mbx->adapter;
-	struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+	const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
 	struct device *dev = &adapter->pdev->dev;
 	atomic_t *rsp_status = &mbx->rsp_status;
 	struct list_head *head = &mbx->cmd_q;
@@ -4098,7 +4098,7 @@
 	}
 }
 
-static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+static const struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
 	.enqueue_cmd    = qlcnic_83xx_enqueue_mbx_cmd,
 	.dequeue_cmd    = qlcnic_83xx_dequeue_mbx_cmd,
 	.decode_resp    = qlcnic_83xx_decode_mbx_rsp,
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index c3c514e..5dade1f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -415,13 +415,6 @@
 		 (qdev->fw_rev_id & 0x000000ff));
 	strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->n_stats = 0;
-	drvinfo->testinfo_len = 0;
-	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
-		drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
-	else
-		drvinfo->regdump_len = sizeof(struct ql_reg_dump);
-	drvinfo->eedump_len = 0;
 }
 
 static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 78bb4ce..ef668d3 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2388,7 +2388,6 @@
 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 	strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
-	info->regdump_len = tp->regs_len;
 }
 
 static int rtl8139_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 2b32e0c..b4f2123 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6081,7 +6081,7 @@
 {
 	void __iomem *ioaddr = tp->mmio_addr;
 	struct pci_dev *pdev = tp->pci_dev;
-	u16 rg_saw_cnt;
+	int rg_saw_cnt;
 	u32 data;
 	static const struct ephy_info e_info_8168h_1[] = {
 		{ 0x1e, 0x0800,	0x0001 },
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index a157aaa..0623fff9 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -766,6 +766,11 @@
 	struct ravb_ptp_perout perout[N_PER_OUT];
 };
 
+enum ravb_chip_id {
+	RCAR_GEN2,
+	RCAR_GEN3,
+};
+
 struct ravb_private {
 	struct net_device *ndev;
 	struct platform_device *pdev;
@@ -806,6 +811,8 @@
 	int msg_enable;
 	int speed;
 	int duplex;
+	int emac_irq;
+	enum ravb_chip_id chip_id;
 
 	unsigned no_avb_link:1;
 	unsigned avb_link_active_low:1;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 450899e..8cc5ec5 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -201,7 +201,7 @@
 	if (priv->rx_ring[q]) {
 		ring_size = sizeof(struct ravb_ex_rx_desc) *
 			    (priv->num_rx_ring[q] + 1);
-		dma_free_coherent(NULL, ring_size, priv->rx_ring[q],
+		dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
 				  priv->rx_desc_dma[q]);
 		priv->rx_ring[q] = NULL;
 	}
@@ -209,7 +209,7 @@
 	if (priv->tx_ring[q]) {
 		ring_size = sizeof(struct ravb_tx_desc) *
 			    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
-		dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
+		dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
 				  priv->tx_desc_dma[q]);
 		priv->tx_ring[q] = NULL;
 	}
@@ -240,13 +240,13 @@
 		rx_desc = &priv->rx_ring[q][i];
 		/* The size of the buffer should be on 16-byte boundary. */
 		rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
-		dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
+		dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
 					  ALIGN(PKT_BUF_SZ, 16),
 					  DMA_FROM_DEVICE);
 		/* We just set the data size to 0 for a failed mapping which
 		 * should prevent DMA from happening...
 		 */
-		if (dma_mapping_error(&ndev->dev, dma_addr))
+		if (dma_mapping_error(ndev->dev.parent, dma_addr))
 			rx_desc->ds_cc = cpu_to_le16(0);
 		rx_desc->dptr = cpu_to_le32(dma_addr);
 		rx_desc->die_dt = DT_FEMPTY;
@@ -309,7 +309,7 @@
 
 	/* Allocate all RX descriptors. */
 	ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
-	priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+	priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 					      &priv->rx_desc_dma[q],
 					      GFP_KERNEL);
 	if (!priv->rx_ring[q])
@@ -320,7 +320,7 @@
 	/* Allocate all TX descriptors. */
 	ring_size = sizeof(struct ravb_tx_desc) *
 		    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
-	priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
+	priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size,
 					      &priv->tx_desc_dma[q],
 					      GFP_KERNEL);
 	if (!priv->tx_ring[q])
@@ -443,7 +443,7 @@
 		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
 		/* Free the original skb. */
 		if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
-			dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 					 size, DMA_TO_DEVICE);
 			/* Last packet descriptor? */
 			if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
@@ -546,7 +546,7 @@
 
 			skb = priv->rx_skb[q][entry];
 			priv->rx_skb[q][entry] = NULL;
-			dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+			dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 					 ALIGN(PKT_BUF_SZ, 16),
 					 DMA_FROM_DEVICE);
 			get_ts &= (q == RAVB_NC) ?
@@ -586,14 +586,14 @@
 			if (!skb)
 				break;	/* Better luck next round. */
 			ravb_set_buffer_align(skb);
-			dma_addr = dma_map_single(&ndev->dev, skb->data,
+			dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 						  le16_to_cpu(desc->ds_cc),
 						  DMA_FROM_DEVICE);
 			skb_checksum_none_assert(skb);
 			/* We just set the data size to 0 for a failed mapping
 			 * which should prevent DMA  from happening...
 			 */
-			if (dma_mapping_error(&ndev->dev, dma_addr))
+			if (dma_mapping_error(ndev->dev.parent, dma_addr))
 				desc->ds_cc = cpu_to_le16(0);
 			desc->dptr = cpu_to_le32(dma_addr);
 			priv->rx_skb[q][entry] = skb;
@@ -889,6 +889,22 @@
 		return -ENOENT;
 	}
 
+	/* This driver only support 10/100Mbit speeds on Gen3
+	 * at this time.
+	 */
+	if (priv->chip_id == RCAR_GEN3) {
+		int err;
+
+		err = phy_set_max_speed(phydev, SPEED_100);
+		if (err) {
+			netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
+			phy_disconnect(phydev);
+			return err;
+		}
+
+		netdev_info(ndev, "limited PHY to 100Mbit/s\n");
+	}
+
 	netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
 		    phydev->addr, phydev->irq, phydev->drv->name);
 
@@ -1197,6 +1213,15 @@
 		goto out_napi_off;
 	}
 
+	if (priv->chip_id == RCAR_GEN3) {
+		error = request_irq(priv->emac_irq, ravb_interrupt,
+				    IRQF_SHARED, ndev->name, ndev);
+		if (error) {
+			netdev_err(ndev, "cannot request IRQ\n");
+			goto out_free_irq;
+		}
+	}
+
 	/* Device init */
 	error = ravb_dmac_init(ndev);
 	if (error)
@@ -1220,6 +1245,7 @@
 	ravb_ptp_stop(ndev);
 out_free_irq:
 	free_irq(ndev->irq, ndev);
+	free_irq(priv->emac_irq, ndev);
 out_napi_off:
 	napi_disable(&priv->napi[RAVB_NC]);
 	napi_disable(&priv->napi[RAVB_BE]);
@@ -1300,8 +1326,8 @@
 		 entry / NUM_TX_DESC * DPTR_ALIGN;
 	len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
 	memcpy(buffer, skb->data, len);
-	dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
-	if (dma_mapping_error(&ndev->dev, dma_addr))
+	dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(ndev->dev.parent, dma_addr))
 		goto drop;
 
 	desc = &priv->tx_ring[q][entry];
@@ -1310,8 +1336,8 @@
 
 	buffer = skb->data + len;
 	len = skb->len - len;
-	dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
-	if (dma_mapping_error(&ndev->dev, dma_addr))
+	dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(ndev->dev.parent, dma_addr))
 		goto unmap;
 
 	desc++;
@@ -1323,7 +1349,7 @@
 		ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
 		if (!ts_skb) {
 			desc--;
-			dma_unmap_single(&ndev->dev, dma_addr, len,
+			dma_unmap_single(ndev->dev.parent, dma_addr, len,
 					 DMA_TO_DEVICE);
 			goto unmap;
 		}
@@ -1358,7 +1384,7 @@
 	return NETDEV_TX_OK;
 
 unmap:
-	dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+	dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
 			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
 drop:
 	dev_kfree_skb_any(skb);
@@ -1625,10 +1651,20 @@
 	return 0;
 }
 
+static const struct of_device_id ravb_match_table[] = {
+	{ .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
+	{ .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
+	{ .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ravb_match_table);
+
 static int ravb_probe(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
+	const struct of_device_id *match;
 	struct ravb_private *priv;
+	enum ravb_chip_id chip_id;
 	struct net_device *ndev;
 	int error, irq, q;
 	struct resource *res;
@@ -1657,7 +1693,14 @@
 	/* The Ether-specific entries in the device structure. */
 	ndev->base_addr = res->start;
 	ndev->dma = -1;
-	irq = platform_get_irq(pdev, 0);
+
+	match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev);
+	chip_id = (enum ravb_chip_id)match->data;
+
+	if (chip_id == RCAR_GEN3)
+		irq = platform_get_irq_byname(pdev, "ch22");
+	else
+		irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
 		error = irq;
 		goto out_release;
@@ -1688,6 +1731,17 @@
 	priv->avb_link_active_low =
 		of_property_read_bool(np, "renesas,ether-link-active-low");
 
+	if (chip_id == RCAR_GEN3) {
+		irq = platform_get_irq_byname(pdev, "ch24");
+		if (irq < 0) {
+			error = irq;
+			goto out_release;
+		}
+		priv->emac_irq = irq;
+	}
+
+	priv->chip_id = chip_id;
+
 	/* Set function */
 	ndev->netdev_ops = &ravb_netdev_ops;
 	ndev->ethtool_ops = &ravb_ethtool_ops;
@@ -1708,7 +1762,7 @@
 
 	/* Allocate descriptor base address table */
 	priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
-	priv->desc_bat = dma_alloc_coherent(NULL, priv->desc_bat_size,
+	priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
 					    &priv->desc_bat_dma, GFP_KERNEL);
 	if (!priv->desc_bat) {
 		dev_err(&ndev->dev,
@@ -1763,7 +1817,7 @@
 	netif_napi_del(&priv->napi[RAVB_BE]);
 	ravb_mdio_release(priv);
 out_dma_free:
-	dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
 			  priv->desc_bat_dma);
 out_release:
 	if (ndev)
@@ -1779,7 +1833,7 @@
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct ravb_private *priv = netdev_priv(ndev);
 
-	dma_free_coherent(NULL, priv->desc_bat_size, priv->desc_bat,
+	dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
 			  priv->desc_bat_dma);
 	/* Set reset mode */
 	ravb_write(ndev, CCC_OPC_RESET, CCC);
@@ -1818,13 +1872,6 @@
 #define RAVB_PM_OPS NULL
 #endif
 
-static const struct of_device_id ravb_match_table[] = {
-	{ .compatible = "renesas,etheravb-r8a7790" },
-	{ .compatible = "renesas,etheravb-r8a7794" },
-	{ }
-};
-MODULE_DEVICE_TABLE(of, ravb_match_table);
-
 static struct platform_driver ravb_driver = {
 	.probe		= ravb_probe,
 	.remove		= ravb_remove,
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index d3f6632..32a80d2 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -3672,7 +3672,7 @@
 	    rocker_port->stp_state == BR_STATE_FORWARDING)
 		return 0;
 
-	flags |= ROCKER_OP_FLAG_REMOVE;
+	flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
 
 	spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
 
@@ -4329,11 +4329,11 @@
 	const struct rocker *rocker = rocker_port->rocker;
 
 	switch (attr->id) {
-	case SWITCHDEV_ATTR_PORT_PARENT_ID:
+	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
 		attr->u.ppid.id_len = sizeof(rocker->hw.id);
 		memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
 		break;
-	case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
 		attr->u.brport_flags = rocker_port->brport_flags;
 		break;
 	default:
@@ -4361,23 +4361,38 @@
 	return err;
 }
 
+static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
+					  struct switchdev_trans *trans,
+					  u32 ageing_time)
+{
+	if (!switchdev_trans_ph_prepare(trans)) {
+		rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
+		mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
+	}
+
+	return 0;
+}
+
 static int rocker_port_attr_set(struct net_device *dev,
-				struct switchdev_attr *attr,
+				const struct switchdev_attr *attr,
 				struct switchdev_trans *trans)
 {
 	struct rocker_port *rocker_port = netdev_priv(dev);
 	int err = 0;
 
 	switch (attr->id) {
-	case SWITCHDEV_ATTR_PORT_STP_STATE:
-		err = rocker_port_stp_update(rocker_port, trans,
-					     ROCKER_OP_FLAG_NOWAIT,
+	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+		err = rocker_port_stp_update(rocker_port, trans, 0,
 					     attr->u.stp_state);
 		break;
-	case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
 		err = rocker_port_brport_flags_set(rocker_port, trans,
 						   attr->u.brport_flags);
 		break;
+	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+		err = rocker_port_bridge_ageing_time(rocker_port, trans,
+						     attr->u.ageing_time);
+		break;
 	default:
 		err = -EOPNOTSUPP;
 		break;
@@ -4408,7 +4423,7 @@
 
 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
 				 struct switchdev_trans *trans,
-				 const struct switchdev_obj_vlan *vlan)
+				 const struct switchdev_obj_port_vlan *vlan)
 {
 	u16 vid;
 	int err;
@@ -4425,7 +4440,7 @@
 
 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
 			       struct switchdev_trans *trans,
-			       const struct switchdev_obj_fdb *fdb)
+			       const struct switchdev_obj_port_fdb *fdb)
 {
 	__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
 	int flags = 0;
@@ -4437,7 +4452,7 @@
 }
 
 static int rocker_port_obj_add(struct net_device *dev,
-			       struct switchdev_obj *obj,
+			       const struct switchdev_obj *obj,
 			       struct switchdev_trans *trans)
 {
 	struct rocker_port *rocker_port = netdev_priv(dev);
@@ -4445,18 +4460,19 @@
 	int err = 0;
 
 	switch (obj->id) {
-	case SWITCHDEV_OBJ_PORT_VLAN:
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
 		err = rocker_port_vlans_add(rocker_port, trans,
-					    &obj->u.vlan);
+					    SWITCHDEV_OBJ_PORT_VLAN(obj));
 		break;
-	case SWITCHDEV_OBJ_IPV4_FIB:
-		fib4 = &obj->u.ipv4_fib;
+	case SWITCHDEV_OBJ_ID_IPV4_FIB:
+		fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
 		err = rocker_port_fib_ipv4(rocker_port, trans,
 					   htonl(fib4->dst), fib4->dst_len,
-					   fib4->fi, fib4->tb_id, 0);
+					   &fib4->fi, fib4->tb_id, 0);
 		break;
-	case SWITCHDEV_OBJ_PORT_FDB:
-		err = rocker_port_fdb_add(rocker_port, trans, &obj->u.fdb);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = rocker_port_fdb_add(rocker_port, trans,
+					  SWITCHDEV_OBJ_PORT_FDB(obj));
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -4481,7 +4497,7 @@
 }
 
 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
-				 const struct switchdev_obj_vlan *vlan)
+				 const struct switchdev_obj_port_vlan *vlan)
 {
 	u16 vid;
 	int err;
@@ -4497,10 +4513,10 @@
 
 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
 			       struct switchdev_trans *trans,
-			       const struct switchdev_obj_fdb *fdb)
+			       const struct switchdev_obj_port_fdb *fdb)
 {
 	__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
-	int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
+	int flags = ROCKER_OP_FLAG_REMOVE;
 
 	if (!rocker_port_is_bridged(rocker_port))
 		return -EINVAL;
@@ -4509,25 +4525,27 @@
 }
 
 static int rocker_port_obj_del(struct net_device *dev,
-			       struct switchdev_obj *obj)
+			       const struct switchdev_obj *obj)
 {
 	struct rocker_port *rocker_port = netdev_priv(dev);
 	const struct switchdev_obj_ipv4_fib *fib4;
 	int err = 0;
 
 	switch (obj->id) {
-	case SWITCHDEV_OBJ_PORT_VLAN:
-		err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = rocker_port_vlans_del(rocker_port,
+					    SWITCHDEV_OBJ_PORT_VLAN(obj));
 		break;
-	case SWITCHDEV_OBJ_IPV4_FIB:
-		fib4 = &obj->u.ipv4_fib;
+	case SWITCHDEV_OBJ_ID_IPV4_FIB:
+		fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
 		err = rocker_port_fib_ipv4(rocker_port, NULL,
 					   htonl(fib4->dst), fib4->dst_len,
-					   fib4->fi, fib4->tb_id,
+					   &fib4->fi, fib4->tb_id,
 					   ROCKER_OP_FLAG_REMOVE);
 		break;
-	case SWITCHDEV_OBJ_PORT_FDB:
-		err = rocker_port_fdb_del(rocker_port, NULL, &obj->u.fdb);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = rocker_port_fdb_del(rocker_port, NULL,
+					  SWITCHDEV_OBJ_PORT_FDB(obj));
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -4538,10 +4556,10 @@
 }
 
 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
-				struct switchdev_obj *obj)
+				struct switchdev_obj_port_fdb *fdb,
+				switchdev_obj_dump_cb_t *cb)
 {
 	struct rocker *rocker = rocker_port->rocker;
-	struct switchdev_obj_fdb *fdb = &obj->u.fdb;
 	struct rocker_fdb_tbl_entry *found;
 	struct hlist_node *tmp;
 	unsigned long lock_flags;
@@ -4552,11 +4570,11 @@
 	hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
 		if (found->key.rocker_port != rocker_port)
 			continue;
-		fdb->addr = found->key.addr;
+		ether_addr_copy(fdb->addr, found->key.addr);
 		fdb->ndm_state = NUD_REACHABLE;
 		fdb->vid = rocker_port_vlan_to_vid(rocker_port,
 						   found->key.vlan_id);
-		err = obj->cb(rocker_port->dev, obj);
+		err = cb(&fdb->obj);
 		if (err)
 			break;
 	}
@@ -4566,9 +4584,9 @@
 }
 
 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
-				 struct switchdev_obj *obj)
+				 struct switchdev_obj_port_vlan *vlan,
+				 switchdev_obj_dump_cb_t *cb)
 {
-	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
 	u16 vid;
 	int err = 0;
 
@@ -4579,7 +4597,7 @@
 		if (rocker_vlan_id_is_internal(htons(vid)))
 			vlan->flags |= BRIDGE_VLAN_INFO_PVID;
 		vlan->vid_begin = vlan->vid_end = vid;
-		err = obj->cb(rocker_port->dev, obj);
+		err = cb(&vlan->obj);
 		if (err)
 			break;
 	}
@@ -4588,17 +4606,20 @@
 }
 
 static int rocker_port_obj_dump(struct net_device *dev,
-				struct switchdev_obj *obj)
+				struct switchdev_obj *obj,
+				switchdev_obj_dump_cb_t *cb)
 {
 	const struct rocker_port *rocker_port = netdev_priv(dev);
 	int err = 0;
 
 	switch (obj->id) {
-	case SWITCHDEV_OBJ_PORT_FDB:
-		err = rocker_port_fdb_dump(rocker_port, obj);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = rocker_port_fdb_dump(rocker_port,
+					   SWITCHDEV_OBJ_PORT_FDB(obj), cb);
 		break;
-	case SWITCHDEV_OBJ_PORT_VLAN:
-		err = rocker_port_vlan_dump(rocker_port, obj);
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = rocker_port_vlan_dump(rocker_port,
+					    SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
 		break;
 	default:
 		err = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index ff649eb..78b7b7b 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1604,6 +1604,22 @@
 	memcpy(outbuf, pdu + offset, outlen);
 }
 
+static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
+{
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+	/* All our allocations have been reset */
+	efx_ef10_reset_mc_allocations(efx);
+
+	/* The datapath firmware might have been changed */
+	nic_data->must_check_datapath_caps = true;
+
+	/* MAC statistics have been cleared on the NIC; clear the local
+	 * statistic that we update with efx_update_diff_stat().
+	 */
+	nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
+}
+
 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
 {
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -1623,17 +1639,7 @@
 		return 0;
 
 	nic_data->warm_boot_count = rc;
-
-	/* All our allocations have been reset */
-	efx_ef10_reset_mc_allocations(efx);
-
-	/* The datapath firmware might have been changed */
-	nic_data->must_check_datapath_caps = true;
-
-	/* MAC statistics have been cleared on the NIC; clear the local
-	 * statistic that we update with efx_update_diff_stat().
-	 */
-	nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
+	efx_ef10_mcdi_reboot_detected(efx);
 
 	return -EIO;
 }
@@ -4670,6 +4676,7 @@
 	.mcdi_poll_response = efx_ef10_mcdi_poll_response,
 	.mcdi_read_response = efx_ef10_mcdi_read_response,
 	.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+	.mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
 	.irq_enable_master = efx_port_dummy_op_void,
 	.irq_test_generate = efx_ef10_irq_test_generate,
 	.irq_disable_non_ev = efx_port_dummy_op_void,
@@ -4774,6 +4781,7 @@
 	.mcdi_poll_response = efx_ef10_mcdi_poll_response,
 	.mcdi_read_response = efx_ef10_mcdi_read_response,
 	.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+	.mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
 	.irq_enable_master = efx_port_dummy_op_void,
 	.irq_test_generate = efx_ef10_irq_test_generate,
 	.irq_disable_non_ev = efx_port_dummy_op_void,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 974637d..6e11ee6 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2062,7 +2062,7 @@
 	netif_napi_add(channel->napi_dev, &channel->napi_str,
 		       efx_poll, napi_weight);
 	napi_hash_add(&channel->napi_str);
-	efx_channel_init_lock(channel);
+	efx_channel_busy_poll_init(channel);
 }
 
 static void efx_init_napi(struct efx_nic *efx)
@@ -2125,7 +2125,7 @@
 	if (!netif_running(efx->net_dev))
 		return LL_FLUSH_FAILED;
 
-	if (!efx_channel_lock_poll(channel))
+	if (!efx_channel_try_lock_poll(channel))
 		return LL_FLUSH_BUSY;
 
 	old_rx_packets = channel->rx_queue.rx_packets;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 98d172b..d3f307e 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1028,10 +1028,21 @@
 
 		/* Consume the status word since efx_mcdi_rpc_finish() won't */
 		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
-			if (efx_mcdi_poll_reboot(efx))
+			rc = efx_mcdi_poll_reboot(efx);
+			if (rc)
 				break;
 			udelay(MCDI_STATUS_DELAY_US);
 		}
+
+		/* On EF10, a CODE_MC_REBOOT event can be received without the
+		 * reboot detection in efx_mcdi_poll_reboot() being triggered.
+		 * If zero was returned from the final call to
+		 * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the
+		 * MC has definitely rebooted so prepare for the reset.
+		 */
+		if (!rc && efx->type->mcdi_reboot_detected)
+			efx->type->mcdi_reboot_detected(efx);
+
 		mcdi->new_epoch = true;
 
 		/* Nobody was waiting for an MCDI request, so trigger a reset */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index c530e1c..229e68c 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -431,21 +431,8 @@
 	struct net_device *napi_dev;
 	struct napi_struct napi_str;
 #ifdef CONFIG_NET_RX_BUSY_POLL
-	unsigned int state;
-	spinlock_t state_lock;
-#define EFX_CHANNEL_STATE_IDLE		0
-#define EFX_CHANNEL_STATE_NAPI		(1 << 0)  /* NAPI owns this channel */
-#define EFX_CHANNEL_STATE_POLL		(1 << 1)  /* poll owns this channel */
-#define EFX_CHANNEL_STATE_DISABLED	(1 << 2)  /* channel is disabled */
-#define EFX_CHANNEL_STATE_NAPI_YIELD	(1 << 3)  /* NAPI yielded this channel */
-#define EFX_CHANNEL_STATE_POLL_YIELD	(1 << 4)  /* poll yielded this channel */
-#define EFX_CHANNEL_OWNED \
-	(EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL)
-#define EFX_CHANNEL_LOCKED \
-	(EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED)
-#define EFX_CHANNEL_USER_PEND \
-	(EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD)
-#endif /* CONFIG_NET_RX_BUSY_POLL */
+	unsigned long busy_poll_state;
+#endif
 	struct efx_special_buffer eventq;
 	unsigned int eventq_mask;
 	unsigned int eventq_read_ptr;
@@ -480,98 +467,94 @@
 };
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void efx_channel_init_lock(struct efx_channel *channel)
+enum efx_channel_busy_poll_state {
+	EFX_CHANNEL_STATE_IDLE = 0,
+	EFX_CHANNEL_STATE_NAPI = BIT(0),
+	EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1,
+	EFX_CHANNEL_STATE_NAPI_REQ = BIT(1),
+	EFX_CHANNEL_STATE_POLL_BIT = 2,
+	EFX_CHANNEL_STATE_POLL = BIT(2),
+	EFX_CHANNEL_STATE_DISABLE_BIT = 3,
+};
+
+static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
 {
-	spin_lock_init(&channel->state_lock);
+	WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
 }
 
 /* Called from the device poll routine to get ownership of a channel. */
 static inline bool efx_channel_lock_napi(struct efx_channel *channel)
 {
-	bool rc = true;
+	unsigned long prev, old = READ_ONCE(channel->busy_poll_state);
 
-	spin_lock_bh(&channel->state_lock);
-	if (channel->state & EFX_CHANNEL_LOCKED) {
-		WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
-		channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD;
-		rc = false;
-	} else {
-		/* we don't care if someone yielded */
-		channel->state = EFX_CHANNEL_STATE_NAPI;
+	while (1) {
+		switch (old) {
+		case EFX_CHANNEL_STATE_POLL:
+			/* Ensure efx_channel_try_lock_poll() wont starve us */
+			set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT,
+				&channel->busy_poll_state);
+			/* fallthrough */
+		case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ:
+			return false;
+		default:
+			break;
+		}
+		prev = cmpxchg(&channel->busy_poll_state, old,
+			       EFX_CHANNEL_STATE_NAPI);
+		if (unlikely(prev != old)) {
+			/* This is likely to mean we've just entered polling
+			 * state. Go back round to set the REQ bit.
+			 */
+			old = prev;
+			continue;
+		}
+		return true;
 	}
-	spin_unlock_bh(&channel->state_lock);
-	return rc;
 }
 
 static inline void efx_channel_unlock_napi(struct efx_channel *channel)
 {
-	spin_lock_bh(&channel->state_lock);
-	WARN_ON(channel->state &
-		(EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD));
-
-	channel->state &= EFX_CHANNEL_STATE_DISABLED;
-	spin_unlock_bh(&channel->state_lock);
+	/* Make sure write has completed from efx_channel_lock_napi() */
+	smp_wmb();
+	WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
 }
 
 /* Called from efx_busy_poll(). */
-static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
 {
-	bool rc = true;
-
-	spin_lock_bh(&channel->state_lock);
-	if ((channel->state & EFX_CHANNEL_LOCKED)) {
-		channel->state |= EFX_CHANNEL_STATE_POLL_YIELD;
-		rc = false;
-	} else {
-		/* preserve yield marks */
-		channel->state |= EFX_CHANNEL_STATE_POLL;
-	}
-	spin_unlock_bh(&channel->state_lock);
-	return rc;
+	return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE,
+			EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE;
 }
 
-/* Returns true if NAPI tried to get the channel while it was locked. */
 static inline void efx_channel_unlock_poll(struct efx_channel *channel)
 {
-	spin_lock_bh(&channel->state_lock);
-	WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
-
-	/* will reset state to idle, unless channel is disabled */
-	channel->state &= EFX_CHANNEL_STATE_DISABLED;
-	spin_unlock_bh(&channel->state_lock);
+	clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
 }
 
-/* True if a socket is polling, even if it did not get the lock. */
 static inline bool efx_channel_busy_polling(struct efx_channel *channel)
 {
-	WARN_ON(!(channel->state & EFX_CHANNEL_OWNED));
-	return channel->state & EFX_CHANNEL_USER_PEND;
+	return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
 }
 
 static inline void efx_channel_enable(struct efx_channel *channel)
 {
-	spin_lock_bh(&channel->state_lock);
-	channel->state = EFX_CHANNEL_STATE_IDLE;
-	spin_unlock_bh(&channel->state_lock);
+	clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT,
+			 &channel->busy_poll_state);
 }
 
-/* False if the channel is currently owned. */
+/* Stop further polling or napi access.
+ * Returns false if the channel is currently busy polling.
+ */
 static inline bool efx_channel_disable(struct efx_channel *channel)
 {
-	bool rc = true;
-
-	spin_lock_bh(&channel->state_lock);
-	if (channel->state & EFX_CHANNEL_OWNED)
-		rc = false;
-	channel->state |= EFX_CHANNEL_STATE_DISABLED;
-	spin_unlock_bh(&channel->state_lock);
-
-	return rc;
+	set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);
+	/* Implicit barrier in efx_channel_busy_polling() */
+	return !efx_channel_busy_polling(channel);
 }
 
 #else /* CONFIG_NET_RX_BUSY_POLL */
 
-static inline void efx_channel_init_lock(struct efx_channel *channel)
+static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
 {
 }
 
@@ -584,7 +567,7 @@
 {
 }
 
-static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
 {
 	return false;
 }
@@ -1277,6 +1260,7 @@
 	void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
 				   size_t pdu_offset, size_t pdu_len);
 	int (*mcdi_poll_reboot)(struct efx_nic *efx);
+	void (*mcdi_reboot_detected)(struct efx_nic *efx);
 	void (*irq_enable_master)(struct efx_nic *efx);
 	void (*irq_test_generate)(struct efx_nic *efx);
 	void (*irq_disable_non_ev)(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 934143e..64d8aa4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -424,7 +424,7 @@
 {
 	struct stmmac_priv *priv = netdev_priv(dev);
 	struct hwtstamp_config config;
-	struct timespec now;
+	struct timespec64 now;
 	u64 temp = 0;
 	u32 ptp_v2 = 0;
 	u32 tstamp_all = 0;
@@ -621,8 +621,10 @@
 					     priv->default_addend);
 
 		/* initialize system time */
-		getnstimeofday(&now);
-		priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
+		ktime_get_real_ts64(&now);
+
+		/* lower 32 bits of tv_sec are safe until y2106 */
+		priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec,
 					    now.tv_nsec);
 	}
 
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 6ce9731..062bce9 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4529,9 +4529,6 @@
 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 	strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
-	info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
-		cp->casreg_len : CAS_MAX_REGS;
-	info->n_stats = CAS_NUM_STAT_KEYS;
 }
 
 static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index a9cac84..14c9d1b 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2182,11 +2182,6 @@
 	strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
 	strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
 		sizeof(drvinfo->bus_info));
-
-	drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
-	drvinfo->testinfo_len = 0;
-	drvinfo->regdump_len = 0;
-	drvinfo->eedump_len = 0;
 }
 
 /*
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index cba3d9f..77d26fe 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -899,7 +899,6 @@
 	strlcpy(info->driver, "cpmac", sizeof(info->driver));
 	strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
 	snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
-	info->regdump_len = 0;
 }
 
 static const struct ethtool_ops cpmac_ethtool_ops = {
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 0ea78326..e9cc61e 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -2,6 +2,8 @@
  *
  * Copyright (C) 2013 Texas Instruments
  *
+ * Module Author: Mugunthan V N <mugunthanvnm@ti.com>
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
  * version 2 as published by the Free Software Foundation.
@@ -13,7 +15,7 @@
  */
 
 #include <linux/platform_device.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <linux/of.h>
@@ -173,7 +175,6 @@
 	},
 	{}
 };
-MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
 
 static int cpsw_phy_sel_probe(struct platform_device *pdev)
 {
@@ -214,7 +215,4 @@
 		.of_match_table = cpsw_phy_sel_id_table,
 	},
 };
-
-module_platform_driver(cpsw_phy_sel_driver);
-MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(cpsw_phy_sel_driver);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 75584cc..040fbc1 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -31,6 +31,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/gpio.h>
 #include <linux/of.h>
+#include <linux/of_mdio.h>
 #include <linux/of_net.h>
 #include <linux/of_device.h>
 #include <linux/if_vlan.h>
@@ -366,6 +367,7 @@
 	spinlock_t			lock;
 	struct platform_device		*pdev;
 	struct net_device		*ndev;
+	struct device_node		*phy_node;
 	struct napi_struct		napi_rx;
 	struct napi_struct		napi_tx;
 	struct device			*dev;
@@ -1146,7 +1148,11 @@
 		cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
 				   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
 
-	slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+	if (priv->phy_node)
+		slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+				 &cpsw_adjust_link, 0, slave->data->phy_if);
+	else
+		slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
 				 &cpsw_adjust_link, slave->data->phy_if);
 	if (IS_ERR(slave->phy)) {
 		dev_err(priv->dev, "phy %s not found on slave %d\n",
@@ -1784,7 +1790,6 @@
 	strlcpy(info->driver, "cpsw", sizeof(info->driver));
 	strlcpy(info->version, "1.0", sizeof(info->version));
 	strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
-	info->regdump_len = cpsw_get_regs_len(ndev);
 }
 
 static u32 cpsw_get_msglevel(struct net_device *ndev)
@@ -1935,11 +1940,12 @@
 	slave->port_vlan = data->dual_emac_res_vlan;
 }
 
-static int cpsw_probe_dt(struct cpsw_platform_data *data,
+static int cpsw_probe_dt(struct cpsw_priv *priv,
 			 struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node;
 	struct device_node *slave_node;
+	struct cpsw_platform_data *data = &priv->data;
 	int i = 0, ret;
 	u32 prop;
 
@@ -2030,6 +2036,7 @@
 		if (strcmp(slave_node->name, "slave"))
 			continue;
 
+		priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
 		parp = of_get_property(slave_node, "phy_id", &lenp);
 		if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
 			dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
@@ -2045,7 +2052,6 @@
 		}
 		snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
 			 PHY_ID_FMT, mdio->name, phyid);
-
 		slave_data->phy_if = of_get_phy_mode(slave_node);
 		if (slave_data->phy_if < 0) {
 			dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
@@ -2246,7 +2252,7 @@
 	/* Select default pin state */
 	pinctrl_pm_select_default_state(&pdev->dev);
 
-	if (cpsw_probe_dt(&priv->data, pdev)) {
+	if (cpsw_probe_dt(priv, pdev)) {
 		dev_err(&pdev->dev, "cpsw: platform data missing\n");
 		ret = -ENODEV;
 		goto clean_runtime_disable_ret;
@@ -2584,17 +2590,7 @@
 	.remove = cpsw_remove,
 };
 
-static int __init cpsw_init(void)
-{
-	return platform_driver_register(&cpsw_driver);
-}
-late_initcall(cpsw_init);
-
-static void __exit cpsw_exit(void)
-{
-	platform_driver_unregister(&cpsw_driver);
-}
-module_exit(cpsw_exit);
+module_platform_driver(cpsw_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 691ec93..a274cd4 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -791,7 +791,6 @@
 			sizeof(info->bus_info));
 	else
 		strlcpy(info->bus_info, "EISA",	sizeof(info->bus_info));
-	info->eedump_len = TLAN_EEPROM_SIZE;
 }
 
 static int tlan_get_eeprom_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index a832637..2b7550c 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2134,10 +2134,11 @@
 			}
 
 			skb_put(skb, pkt_len);
-			skb->protocol = eth_type_trans(skb, dev);
 
 			rhine_rx_vlan_tag(skb, desc, data_size);
 
+			skb->protocol = eth_type_trans(skb, dev);
+
 			netif_receive_skb(skb);
 
 			u64_stats_update_begin(&rp->rx_stats.syncp);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index d95f9aa..4684644 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1135,7 +1135,6 @@
 {
 	strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
 	strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
-	ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
 }
 
 /**
diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c
index 0119dd1..9c218e1 100644
--- a/drivers/net/fjes/fjes_ethtool.c
+++ b/drivers/net/fjes/fjes_ethtool.c
@@ -105,8 +105,6 @@
 	strlcpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version));
 	snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
 		 "platform:%s", plat_dev->name);
-	drvinfo->regdump_len = 0;
-	drvinfo->eedump_len = 0;
 }
 
 static int fjes_get_settings(struct net_device *netdev,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 8f5c02e..de5c30c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -46,16 +46,27 @@
 
 static int geneve_net_id;
 
+union geneve_addr {
+	struct sockaddr_in sin;
+	struct sockaddr_in6 sin6;
+	struct sockaddr sa;
+};
+
+static union geneve_addr geneve_remote_unspec = { .sa.sa_family = AF_UNSPEC, };
+
 /* Pseudo network device */
 struct geneve_dev {
 	struct hlist_node  hlist;	/* vni hash table */
 	struct net	   *net;	/* netns for packet i/o */
 	struct net_device  *dev;	/* netdev for geneve tunnel */
-	struct geneve_sock *sock;	/* socket used for geneve tunnel */
+	struct geneve_sock *sock4;	/* IPv4 socket used for geneve tunnel */
+#if IS_ENABLED(CONFIG_IPV6)
+	struct geneve_sock *sock6;	/* IPv6 socket used for geneve tunnel */
+#endif
 	u8                 vni[3];	/* virtual network ID for tunnel */
 	u8                 ttl;		/* TTL override */
 	u8                 tos;		/* TOS override */
-	struct sockaddr_in remote;	/* IPv4 address for link partner */
+	union geneve_addr  remote;	/* IP address for link partner */
 	struct list_head   next;	/* geneve's per namespace list */
 	__be16		   dst_port;
 	bool		   collect_md;
@@ -103,12 +114,32 @@
 	vni_list_head = &gs->vni_list[hash];
 	hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
 		if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
-		    addr == geneve->remote.sin_addr.s_addr)
+		    addr == geneve->remote.sin.sin_addr.s_addr)
 			return geneve;
 	}
 	return NULL;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs,
+					 struct in6_addr addr6, u8 vni[])
+{
+	struct hlist_head *vni_list_head;
+	struct geneve_dev *geneve;
+	__u32 hash;
+
+	/* Find the device for this VNI */
+	hash = geneve_net_vni_hash(vni);
+	vni_list_head = &gs->vni_list[hash];
+	hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
+		if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
+		    ipv6_addr_equal(&addr6, &geneve->remote.sin6.sin6_addr))
+			return geneve;
+	}
+	return NULL;
+}
+#endif
+
 static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
 {
 	return (struct genevehdr *)(udp_hdr(skb) + 1);
@@ -121,24 +152,49 @@
 	struct metadata_dst *tun_dst = NULL;
 	struct geneve_dev *geneve = NULL;
 	struct pcpu_sw_netstats *stats;
-	struct iphdr *iph;
-	u8 *vni;
+	struct iphdr *iph = NULL;
 	__be32 addr;
-	int err;
+	static u8 zero_vni[3];
+	u8 *vni;
+	int err = 0;
+	sa_family_t sa_family;
+#if IS_ENABLED(CONFIG_IPV6)
+	struct ipv6hdr *ip6h = NULL;
+	struct in6_addr addr6;
+	static struct in6_addr zero_addr6;
+#endif
 
-	iph = ip_hdr(skb); /* outer IP header... */
+	sa_family = gs->sock->sk->sk_family;
 
-	if (gs->collect_md) {
-		static u8 zero_vni[3];
+	if (sa_family == AF_INET) {
+		iph = ip_hdr(skb); /* outer IP header... */
 
-		vni = zero_vni;
-		addr = 0;
-	} else {
-		vni = gnvh->vni;
-		addr = iph->saddr;
+		if (gs->collect_md) {
+			vni = zero_vni;
+			addr = 0;
+		} else {
+			vni = gnvh->vni;
+
+			addr = iph->saddr;
+		}
+
+		geneve = geneve_lookup(gs, addr, vni);
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if (sa_family == AF_INET6) {
+		ip6h = ipv6_hdr(skb); /* outer IPv6 header... */
+
+		if (gs->collect_md) {
+			vni = zero_vni;
+			addr6 = zero_addr6;
+		} else {
+			vni = gnvh->vni;
+
+			addr6 = ip6h->saddr;
+		}
+
+		geneve = geneve6_lookup(gs, addr6, vni);
+#endif
 	}
-
-	geneve = geneve_lookup(gs, addr, vni);
 	if (!geneve)
 		goto drop;
 
@@ -149,7 +205,7 @@
 			(gnvh->oam ? TUNNEL_OAM : 0) |
 			(gnvh->critical ? TUNNEL_CRIT_OPT : 0);
 
-		tun_dst = udp_tun_rx_dst(skb, AF_INET, flags,
+		tun_dst = udp_tun_rx_dst(skb, sa_family, flags,
 					 vni_to_tunnel_id(gnvh->vni),
 					 gnvh->opt_len * 4);
 		if (!tun_dst)
@@ -179,12 +235,25 @@
 
 	skb_reset_network_header(skb);
 
-	err = IP_ECN_decapsulate(iph, skb);
+	if (iph)
+		err = IP_ECN_decapsulate(iph, skb);
+#if IS_ENABLED(CONFIG_IPV6)
+	if (ip6h)
+		err = IP6_ECN_decapsulate(ip6h, skb);
+#endif
 
 	if (unlikely(err)) {
-		if (log_ecn_error)
-			net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
-					     &iph->saddr, iph->tos);
+		if (log_ecn_error) {
+			if (iph)
+				net_info_ratelimited("non-ECT from %pI4 "
+						     "with TOS=%#x\n",
+						     &iph->saddr, iph->tos);
+#if IS_ENABLED(CONFIG_IPV6)
+			if (ip6h)
+				net_info_ratelimited("non-ECT from %pI6\n",
+						     &ip6h->saddr);
+#endif
+		}
 		if (err > 1) {
 			++geneve->dev->stats.rx_frame_errors;
 			++geneve->dev->stats.rx_errors;
@@ -284,6 +353,7 @@
 
 	if (ipv6) {
 		udp_conf.family = AF_INET6;
+		udp_conf.ipv6_v6only = 1;
 	} else {
 		udp_conf.family = AF_INET;
 		udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
@@ -458,9 +528,9 @@
 		udp_del_offload(&gs->udp_offloads);
 }
 
-static void geneve_sock_release(struct geneve_sock *gs)
+static void __geneve_sock_release(struct geneve_sock *gs)
 {
-	if (--gs->refcnt)
+	if (!gs || --gs->refcnt)
 		return;
 
 	list_del(&gs->list);
@@ -469,66 +539,117 @@
 	kfree_rcu(gs, rcu);
 }
 
+static void geneve_sock_release(struct geneve_dev *geneve)
+{
+	__geneve_sock_release(geneve->sock4);
+#if IS_ENABLED(CONFIG_IPV6)
+	__geneve_sock_release(geneve->sock6);
+#endif
+}
+
 static struct geneve_sock *geneve_find_sock(struct geneve_net *gn,
+					    sa_family_t family,
 					    __be16 dst_port)
 {
 	struct geneve_sock *gs;
 
 	list_for_each_entry(gs, &gn->sock_list, list) {
 		if (inet_sk(gs->sock->sk)->inet_sport == dst_port &&
-		    inet_sk(gs->sock->sk)->sk.sk_family == AF_INET) {
+		    inet_sk(gs->sock->sk)->sk.sk_family == family) {
 			return gs;
 		}
 	}
 	return NULL;
 }
 
-static int geneve_open(struct net_device *dev)
+static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
 {
-	struct geneve_dev *geneve = netdev_priv(dev);
 	struct net *net = geneve->net;
 	struct geneve_net *gn = net_generic(net, geneve_net_id);
 	struct geneve_sock *gs;
 	__u32 hash;
 
-	gs = geneve_find_sock(gn, geneve->dst_port);
+	gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->dst_port);
 	if (gs) {
 		gs->refcnt++;
 		goto out;
 	}
 
-	gs = geneve_socket_create(net, geneve->dst_port, false);
+	gs = geneve_socket_create(net, geneve->dst_port, ipv6);
 	if (IS_ERR(gs))
 		return PTR_ERR(gs);
 
 out:
 	gs->collect_md = geneve->collect_md;
-	geneve->sock = gs;
+#if IS_ENABLED(CONFIG_IPV6)
+	if (ipv6)
+		geneve->sock6 = gs;
+	else
+#endif
+		geneve->sock4 = gs;
 
 	hash = geneve_net_vni_hash(geneve->vni);
 	hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
 	return 0;
 }
 
+static int geneve_open(struct net_device *dev)
+{
+	struct geneve_dev *geneve = netdev_priv(dev);
+	bool ipv6 = geneve->remote.sa.sa_family == AF_INET6;
+	bool metadata = geneve->collect_md;
+	int ret = 0;
+
+	geneve->sock4 = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+	geneve->sock6 = NULL;
+	if (ipv6 || metadata)
+		ret = geneve_sock_add(geneve, true);
+#endif
+	if (!ret && (!ipv6 || metadata))
+		ret = geneve_sock_add(geneve, false);
+	if (ret < 0)
+		geneve_sock_release(geneve);
+
+	return ret;
+}
+
 static int geneve_stop(struct net_device *dev)
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
-	struct geneve_sock *gs = geneve->sock;
 
 	if (!hlist_unhashed(&geneve->hlist))
 		hlist_del_rcu(&geneve->hlist);
-	geneve_sock_release(gs);
+	geneve_sock_release(geneve);
 	return 0;
 }
 
+static void geneve_build_header(struct genevehdr *geneveh,
+				__be16 tun_flags, u8 vni[3],
+				u8 options_len, u8 *options)
+{
+	geneveh->ver = GENEVE_VER;
+	geneveh->opt_len = options_len / 4;
+	geneveh->oam = !!(tun_flags & TUNNEL_OAM);
+	geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
+	geneveh->rsvd1 = 0;
+	memcpy(geneveh->vni, vni, 3);
+	geneveh->proto_type = htons(ETH_P_TEB);
+	geneveh->rsvd2 = 0;
+
+	memcpy(geneveh->options, options, options_len);
+}
+
 static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
 			    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
-			    bool csum)
+			    bool csum, bool xnet)
 {
 	struct genevehdr *gnvh;
 	int min_headroom;
 	int err;
 
+	skb_scrub_packet(skb, xnet);
+
 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
 			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
 	err = skb_cow_head(skb, min_headroom);
@@ -544,15 +665,7 @@
 	}
 
 	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
-	gnvh->ver = GENEVE_VER;
-	gnvh->opt_len = opt_len / 4;
-	gnvh->oam = !!(tun_flags & TUNNEL_OAM);
-	gnvh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
-	gnvh->rsvd1 = 0;
-	memcpy(gnvh->vni, vni, 3);
-	gnvh->proto_type = htons(ETH_P_TEB);
-	gnvh->rsvd2 = 0;
-	memcpy(gnvh->options, opt, opt_len);
+	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
 
 	skb_set_inner_protocol(skb, htons(ETH_P_TEB));
 	return 0;
@@ -562,10 +675,47 @@
 	return err;
 }
 
-static struct rtable *geneve_get_rt(struct sk_buff *skb,
-				    struct net_device *dev,
-				    struct flowi4 *fl4,
-				    struct ip_tunnel_info *info)
+#if IS_ENABLED(CONFIG_IPV6)
+static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
+			     __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
+			     bool csum, bool xnet)
+{
+	struct genevehdr *gnvh;
+	int min_headroom;
+	int err;
+
+	skb_scrub_packet(skb, xnet);
+
+	min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
+	err = skb_cow_head(skb, min_headroom);
+	if (unlikely(err)) {
+		kfree_skb(skb);
+		goto free_dst;
+	}
+
+	skb = udp_tunnel_handle_offloads(skb, csum);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		goto free_dst;
+	}
+
+	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
+	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
+
+	skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+	return 0;
+
+free_dst:
+	dst_release(dst);
+	return err;
+}
+#endif
+
+static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
+				       struct net_device *dev,
+				       struct flowi4 *fl4,
+				       struct ip_tunnel_info *info)
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
 	struct rtable *rt = NULL;
@@ -588,24 +738,67 @@
 		}
 
 		fl4->flowi4_tos = RT_TOS(tos);
-		fl4->daddr = geneve->remote.sin_addr.s_addr;
+		fl4->daddr = geneve->remote.sin.sin_addr.s_addr;
 	}
 
 	rt = ip_route_output_key(geneve->net, fl4);
 	if (IS_ERR(rt)) {
 		netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
-		dev->stats.tx_carrier_errors++;
-		return rt;
+		return ERR_PTR(-ENETUNREACH);
 	}
 	if (rt->dst.dev == dev) { /* is this necessary? */
 		netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
-		dev->stats.collisions++;
 		ip_rt_put(rt);
-		return ERR_PTR(-EINVAL);
+		return ERR_PTR(-ELOOP);
 	}
 	return rt;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
+					   struct net_device *dev,
+					   struct flowi6 *fl6,
+					   struct ip_tunnel_info *info)
+{
+	struct geneve_dev *geneve = netdev_priv(dev);
+	struct geneve_sock *gs6 = geneve->sock6;
+	struct dst_entry *dst = NULL;
+	__u8 prio;
+
+	memset(fl6, 0, sizeof(*fl6));
+	fl6->flowi6_mark = skb->mark;
+	fl6->flowi6_proto = IPPROTO_UDP;
+
+	if (info) {
+		fl6->daddr = info->key.u.ipv6.dst;
+		fl6->saddr = info->key.u.ipv6.src;
+		fl6->flowi6_tos = RT_TOS(info->key.tos);
+	} else {
+		prio = geneve->tos;
+		if (prio == 1) {
+			const struct iphdr *iip = ip_hdr(skb);
+
+			prio = ip_tunnel_get_dsfield(iip, skb);
+		}
+
+		fl6->flowi6_tos = RT_TOS(prio);
+		fl6->daddr = geneve->remote.sin6.sin6_addr;
+	}
+
+	if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) {
+		netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
+		return ERR_PTR(-ENETUNREACH);
+	}
+	if (dst->dev == dev) { /* is this necessary? */
+		netdev_dbg(dev, "circular route to %pI6\n", &fl6->daddr);
+		dst_release(dst);
+		return ERR_PTR(-ELOOP);
+	}
+
+	return dst;
+}
+#endif
+
 /* Convert 64 bit tunnel ID to 24 bit VNI. */
 static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
 {
@@ -620,23 +813,23 @@
 #endif
 }
 
-static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+				   struct ip_tunnel_info *info)
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
-	struct geneve_sock *gs = geneve->sock;
-	struct ip_tunnel_info *info = NULL;
+	struct geneve_sock *gs4 = geneve->sock4;
 	struct rtable *rt = NULL;
 	const struct iphdr *iip; /* interior IP header */
+	int err = -EINVAL;
 	struct flowi4 fl4;
 	__u8 tos, ttl;
 	__be16 sport;
 	bool udp_csum;
 	__be16 df;
-	int err;
+	bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
 
 	if (geneve->collect_md) {
-		info = skb_tunnel_info(skb);
-		if (unlikely(info && !(info->mode & IP_TUNNEL_INFO_TX))) {
+		if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
 			netdev_dbg(dev, "no tunnel metadata\n");
 			goto tx_error;
 		}
@@ -644,10 +837,9 @@
 			goto tx_error;
 	}
 
-	rt = geneve_get_rt(skb, dev, &fl4, info);
+	rt = geneve_get_v4_rt(skb, dev, &fl4, info);
 	if (IS_ERR(rt)) {
-		netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
-		dev->stats.tx_carrier_errors++;
+		err = PTR_ERR(rt);
 		goto tx_error;
 	}
 
@@ -667,7 +859,7 @@
 
 		udp_csum = !!(key->tun_flags & TUNNEL_CSUM);
 		err = geneve_build_skb(rt, skb, key->tun_flags, vni,
-				       info->options_len, opts, udp_csum);
+				       info->options_len, opts, udp_csum, xnet);
 		if (unlikely(err))
 			goto err;
 
@@ -677,7 +869,7 @@
 	} else {
 		udp_csum = false;
 		err = geneve_build_skb(rt, skb, 0, geneve->vni,
-				       0, NULL, udp_csum);
+				       0, NULL, udp_csum, xnet);
 		if (unlikely(err))
 			goto err;
 
@@ -688,7 +880,7 @@
 		ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
 		df = 0;
 	}
-	err = udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, fl4.saddr, fl4.daddr,
+	err = udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
 				  tos, ttl, df, sport, geneve->dst_port,
 				  !net_eq(geneve->net, dev_net(geneve->dev)),
 				  !udp_csum);
@@ -699,10 +891,152 @@
 tx_error:
 	dev_kfree_skb(skb);
 err:
-	dev->stats.tx_errors++;
+	if (err == -ELOOP)
+		dev->stats.collisions++;
+	else if (err == -ENETUNREACH)
+		dev->stats.tx_carrier_errors++;
+	else
+		dev->stats.tx_errors++;
 	return NETDEV_TX_OK;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
+				    struct ip_tunnel_info *info)
+{
+	struct geneve_dev *geneve = netdev_priv(dev);
+	struct geneve_sock *gs6 = geneve->sock6;
+	struct dst_entry *dst = NULL;
+	const struct iphdr *iip; /* interior IP header */
+	int err = -EINVAL;
+	struct flowi6 fl6;
+	__u8 prio, ttl;
+	__be16 sport;
+	bool udp_csum;
+	bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+
+	if (geneve->collect_md) {
+		if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
+			netdev_dbg(dev, "no tunnel metadata\n");
+			goto tx_error;
+		}
+	}
+
+	dst = geneve_get_v6_dst(skb, dev, &fl6, info);
+	if (IS_ERR(dst)) {
+		err = PTR_ERR(dst);
+		goto tx_error;
+	}
+
+	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+	skb_reset_mac_header(skb);
+
+	iip = ip_hdr(skb);
+
+	if (info) {
+		const struct ip_tunnel_key *key = &info->key;
+		u8 *opts = NULL;
+		u8 vni[3];
+
+		tunnel_id_to_vni(key->tun_id, vni);
+		if (key->tun_flags & TUNNEL_GENEVE_OPT)
+			opts = ip_tunnel_info_opts(info);
+
+		udp_csum = !!(key->tun_flags & TUNNEL_CSUM);
+		err = geneve6_build_skb(dst, skb, key->tun_flags, vni,
+					info->options_len, opts,
+					udp_csum, xnet);
+		if (unlikely(err))
+			goto err;
+
+		prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
+		ttl = key->ttl;
+	} else {
+		udp_csum = false;
+		err = geneve6_build_skb(dst, skb, 0, geneve->vni,
+					0, NULL, udp_csum, xnet);
+		if (unlikely(err))
+			goto err;
+
+		prio = ip_tunnel_ecn_encap(fl6.flowi6_tos, iip, skb);
+		ttl = geneve->ttl;
+		if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
+			ttl = 1;
+		ttl = ttl ? : ip6_dst_hoplimit(dst);
+	}
+	err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
+				   &fl6.saddr, &fl6.daddr, prio, ttl,
+				   sport, geneve->dst_port, !udp_csum);
+
+	iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+	return NETDEV_TX_OK;
+
+tx_error:
+	dev_kfree_skb(skb);
+err:
+	if (err == -ELOOP)
+		dev->stats.collisions++;
+	else if (err == -ENETUNREACH)
+		dev->stats.tx_carrier_errors++;
+	else
+		dev->stats.tx_errors++;
+	return NETDEV_TX_OK;
+}
+#endif
+
+static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct geneve_dev *geneve = netdev_priv(dev);
+	struct ip_tunnel_info *info = NULL;
+
+	if (geneve->collect_md)
+		info = skb_tunnel_info(skb);
+
+#if IS_ENABLED(CONFIG_IPV6)
+	if ((info && ip_tunnel_info_af(info) == AF_INET6) ||
+	    (!info && geneve->remote.sa.sa_family == AF_INET6))
+		return geneve6_xmit_skb(skb, dev, info);
+#endif
+	return geneve_xmit_skb(skb, dev, info);
+}
+
+static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+	struct ip_tunnel_info *info = skb_tunnel_info(skb);
+	struct geneve_dev *geneve = netdev_priv(dev);
+	struct rtable *rt;
+	struct flowi4 fl4;
+#if IS_ENABLED(CONFIG_IPV6)
+	struct dst_entry *dst;
+	struct flowi6 fl6;
+#endif
+
+	if (ip_tunnel_info_af(info) == AF_INET) {
+		rt = geneve_get_v4_rt(skb, dev, &fl4, info);
+		if (IS_ERR(rt))
+			return PTR_ERR(rt);
+
+		ip_rt_put(rt);
+		info->key.u.ipv4.src = fl4.saddr;
+#if IS_ENABLED(CONFIG_IPV6)
+	} else if (ip_tunnel_info_af(info) == AF_INET6) {
+		dst = geneve_get_v6_dst(skb, dev, &fl6, info);
+		if (IS_ERR(dst))
+			return PTR_ERR(dst);
+
+		dst_release(dst);
+		info->key.u.ipv6.src = fl6.saddr;
+#endif
+	} else {
+		return -EINVAL;
+	}
+
+	info->key.tp_src = udp_flow_src_port(geneve->net, skb,
+					     1, USHRT_MAX, true);
+	info->key.tp_dst = geneve->dst_port;
+	return 0;
+}
+
 static const struct net_device_ops geneve_netdev_ops = {
 	.ndo_init		= geneve_init,
 	.ndo_uninit		= geneve_uninit,
@@ -713,6 +1047,7 @@
 	.ndo_change_mtu		= eth_change_mtu,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_fill_metadata_dst	= geneve_fill_metadata_dst,
 };
 
 static void geneve_get_drvinfo(struct net_device *dev,
@@ -759,6 +1094,7 @@
 static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
 	[IFLA_GENEVE_ID]		= { .type = NLA_U32 },
 	[IFLA_GENEVE_REMOTE]		= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+	[IFLA_GENEVE_REMOTE6]		= { .len = sizeof(struct in6_addr) },
 	[IFLA_GENEVE_TTL]		= { .type = NLA_U8 },
 	[IFLA_GENEVE_TOS]		= { .type = NLA_U8 },
 	[IFLA_GENEVE_PORT]		= { .type = NLA_U16 },
@@ -790,7 +1126,7 @@
 
 static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
 					  __be16 dst_port,
-					  __be32 rem_addr,
+					  union geneve_addr *remote,
 					  u8 vni[],
 					  bool *tun_on_same_port,
 					  bool *tun_collect_md)
@@ -806,7 +1142,7 @@
 			*tun_on_same_port = true;
 		}
 		if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
-		    rem_addr == geneve->remote.sin_addr.s_addr &&
+		    !memcmp(remote, &geneve->remote, sizeof(geneve->remote)) &&
 		    dst_port == geneve->dst_port)
 			t = geneve;
 	}
@@ -814,18 +1150,20 @@
 }
 
 static int geneve_configure(struct net *net, struct net_device *dev,
-			    __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos,
-			    __be16 dst_port, bool metadata)
+			    union geneve_addr *remote,
+			    __u32 vni, __u8 ttl, __u8 tos, __be16 dst_port,
+			    bool metadata)
 {
 	struct geneve_net *gn = net_generic(net, geneve_net_id);
 	struct geneve_dev *t, *geneve = netdev_priv(dev);
 	bool tun_collect_md, tun_on_same_port;
 	int err;
 
-	if (metadata) {
-		if (rem_addr || vni || tos || ttl)
-			return -EINVAL;
-	}
+	if (!remote)
+		return -EINVAL;
+	if (metadata &&
+	    (remote->sa.sa_family != AF_UNSPEC || vni || tos || ttl))
+		return -EINVAL;
 
 	geneve->net = net;
 	geneve->dev = dev;
@@ -834,16 +1172,19 @@
 	geneve->vni[1] = (vni & 0x0000ff00) >> 8;
 	geneve->vni[2] =  vni & 0x000000ff;
 
-	geneve->remote.sin_addr.s_addr = rem_addr;
-	if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr)))
+	if ((remote->sa.sa_family == AF_INET &&
+	     IN_MULTICAST(ntohl(remote->sin.sin_addr.s_addr))) ||
+	    (remote->sa.sa_family == AF_INET6 &&
+	     ipv6_addr_is_multicast(&remote->sin6.sin6_addr)))
 		return -EINVAL;
+	geneve->remote = *remote;
 
 	geneve->ttl = ttl;
 	geneve->tos = tos;
 	geneve->dst_port = dst_port;
 	geneve->collect_md = metadata;
 
-	t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni,
+	t = geneve_find_dev(gn, dst_port, remote, geneve->vni,
 			    &tun_on_same_port, &tun_collect_md);
 	if (t)
 		return -EBUSY;
@@ -870,14 +1211,35 @@
 	__be16 dst_port = htons(GENEVE_UDP_PORT);
 	__u8 ttl = 0, tos = 0;
 	bool metadata = false;
-	__be32 rem_addr;
-	__u32 vni;
+	union geneve_addr remote = geneve_remote_unspec;
+	__u32 vni = 0;
 
-	if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
+	if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6])
 		return -EINVAL;
 
-	vni = nla_get_u32(data[IFLA_GENEVE_ID]);
-	rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+	if (data[IFLA_GENEVE_REMOTE]) {
+		remote.sa.sa_family = AF_INET;
+		remote.sin.sin_addr.s_addr =
+			nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+	}
+
+	if (data[IFLA_GENEVE_REMOTE6]) {
+		if (!IS_ENABLED(CONFIG_IPV6))
+			return -EPFNOSUPPORT;
+
+		remote.sa.sa_family = AF_INET6;
+		remote.sin6.sin6_addr =
+			nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]);
+
+		if (ipv6_addr_type(&remote.sin6.sin6_addr) &
+		    IPV6_ADDR_LINKLOCAL) {
+			netdev_dbg(dev, "link-local remote is unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (data[IFLA_GENEVE_ID])
+		vni = nla_get_u32(data[IFLA_GENEVE_ID]);
 
 	if (data[IFLA_GENEVE_TTL])
 		ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
@@ -891,8 +1253,8 @@
 	if (data[IFLA_GENEVE_COLLECT_METADATA])
 		metadata = true;
 
-	return geneve_configure(net, dev, rem_addr, vni,
-				ttl, tos, dst_port, metadata);
+	return geneve_configure(net, dev, &remote, vni, ttl, tos, dst_port,
+				metadata);
 }
 
 static void geneve_dellink(struct net_device *dev, struct list_head *head)
@@ -906,7 +1268,7 @@
 static size_t geneve_get_size(const struct net_device *dev)
 {
 	return nla_total_size(sizeof(__u32)) +	/* IFLA_GENEVE_ID */
-		nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
+		nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */
 		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TTL */
 		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TOS */
 		nla_total_size(sizeof(__be16)) +  /* IFLA_GENEVE_PORT */
@@ -923,9 +1285,17 @@
 	if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
 		goto nla_put_failure;
 
-	if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
-			    geneve->remote.sin_addr.s_addr))
-		goto nla_put_failure;
+	if (geneve->remote.sa.sa_family == AF_INET) {
+		if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
+				    geneve->remote.sin.sin_addr.s_addr))
+			goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+	} else {
+		if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
+				     &geneve->remote.sin6.sin6_addr))
+			goto nla_put_failure;
+#endif
+	}
 
 	if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
 	    nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
@@ -971,7 +1341,8 @@
 	if (IS_ERR(dev))
 		return dev;
 
-	err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true);
+	err = geneve_configure(net, dev, &geneve_remote_unspec,
+			       0, 0, 0, htons(dst_port), true);
 	if (err) {
 		free_netdev(dev);
 		return ERR_PTR(err);
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 5a614b2..ce5f1a2 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -43,6 +43,7 @@
 	tristate "Microchip MRF24J40 transceiver driver"
 	depends on IEEE802154_DRIVERS && MAC802154
 	depends on SPI
+	select REGMAP_SPI
 	---help---
 	  Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
 	  controller.
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 9756e64..de6e4fa 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -81,7 +81,7 @@
 	u8 from_state;
 	u8 to_state;
 
-	bool irq_enable;
+	bool free;
 };
 
 struct at86rf230_trac {
@@ -105,8 +105,6 @@
 	struct completion state_complete;
 	struct at86rf230_state_change state;
 
-	struct at86rf230_state_change irq;
-
 	unsigned long cal_timeout;
 	bool is_tx;
 	bool is_tx_from_off;
@@ -122,8 +120,7 @@
 static void
 at86rf230_async_state_change(struct at86rf230_local *lp,
 			     struct at86rf230_state_change *ctx,
-			     const u8 state, void (*complete)(void *context),
-			     const bool irq_enable);
+			     const u8 state, void (*complete)(void *context));
 
 static inline void
 at86rf230_sleep(struct at86rf230_local *lp)
@@ -352,8 +349,10 @@
 	struct at86rf230_local *lp = ctx->lp;
 
 	lp->is_tx = 0;
-	at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL, false);
+	at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL);
 	ieee802154_wake_queue(lp->hw);
+	if (ctx->free)
+		kfree(ctx);
 }
 
 static inline void
@@ -363,15 +362,14 @@
 	dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
 
 	at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
-				     at86rf230_async_error_recover, false);
+				     at86rf230_async_error_recover);
 }
 
 /* Generic function to get some register value in async mode */
 static void
-at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
+at86rf230_async_read_reg(struct at86rf230_local *lp, u8 reg,
 			 struct at86rf230_state_change *ctx,
-			 void (*complete)(void *context),
-			 const bool irq_enable)
+			 void (*complete)(void *context))
 {
 	int rc;
 
@@ -379,14 +377,24 @@
 
 	tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
 	ctx->msg.complete = complete;
-	ctx->irq_enable = irq_enable;
 	rc = spi_async(lp->spi, &ctx->msg);
-	if (rc) {
-		if (irq_enable)
-			enable_irq(ctx->irq);
-
+	if (rc)
 		at86rf230_async_error(lp, ctx, rc);
-	}
+}
+
+static void
+at86rf230_async_write_reg(struct at86rf230_local *lp, u8 reg, u8 val,
+			  struct at86rf230_state_change *ctx,
+			  void (*complete)(void *context))
+{
+	int rc;
+
+	ctx->buf[0] = (reg & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+	ctx->buf[1] = val;
+	ctx->msg.complete = complete;
+	rc = spi_async(lp->spi, &ctx->msg);
+	if (rc)
+		at86rf230_async_error(lp, ctx, rc);
 }
 
 static void
@@ -434,8 +442,7 @@
 				lp->tx_retry++;
 
 				at86rf230_async_state_change(lp, ctx, state,
-							     ctx->complete,
-							     ctx->irq_enable);
+							     ctx->complete);
 				return;
 			}
 		}
@@ -456,8 +463,7 @@
 	struct at86rf230_local *lp = ctx->lp;
 
 	at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
-				 at86rf230_async_state_assert,
-				 ctx->irq_enable);
+				 at86rf230_async_state_assert);
 
 	return HRTIMER_NORESTART;
 }
@@ -562,14 +568,12 @@
 	struct at86rf230_local *lp = ctx->lp;
 	u8 *buf = ctx->buf;
 	const u8 trx_state = buf[1] & TRX_STATE_MASK;
-	int rc;
 
 	/* Check for "possible" STATE_TRANSITION_IN_PROGRESS */
 	if (trx_state == STATE_TRANSITION_IN_PROGRESS) {
 		udelay(1);
 		at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
-					 at86rf230_async_state_change_start,
-					 ctx->irq_enable);
+					 at86rf230_async_state_change_start);
 		return;
 	}
 
@@ -586,31 +590,20 @@
 	/* Going into the next step for a state change which do a timing
 	 * relevant delay.
 	 */
-	buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
-	buf[1] = ctx->to_state;
-	ctx->msg.complete = at86rf230_async_state_delay;
-	rc = spi_async(lp->spi, &ctx->msg);
-	if (rc) {
-		if (ctx->irq_enable)
-			enable_irq(ctx->irq);
-
-		at86rf230_async_error(lp, ctx, rc);
-	}
+	at86rf230_async_write_reg(lp, RG_TRX_STATE, ctx->to_state, ctx,
+				  at86rf230_async_state_delay);
 }
 
 static void
 at86rf230_async_state_change(struct at86rf230_local *lp,
 			     struct at86rf230_state_change *ctx,
-			     const u8 state, void (*complete)(void *context),
-			     const bool irq_enable)
+			     const u8 state, void (*complete)(void *context))
 {
 	/* Initialization for the state change context */
 	ctx->to_state = state;
 	ctx->complete = complete;
-	ctx->irq_enable = irq_enable;
 	at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
-				 at86rf230_async_state_change_start,
-				 irq_enable);
+				 at86rf230_async_state_change_start);
 }
 
 static void
@@ -632,8 +625,7 @@
 	unsigned long rc;
 
 	at86rf230_async_state_change(lp, &lp->state, state,
-				     at86rf230_sync_state_change_complete,
-				     false);
+				     at86rf230_sync_state_change_complete);
 
 	rc = wait_for_completion_timeout(&lp->state_complete,
 					 msecs_to_jiffies(100));
@@ -651,9 +643,8 @@
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
 
-	enable_irq(ctx->irq);
-
 	ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
+	kfree(ctx);
 }
 
 static void
@@ -663,7 +654,7 @@
 	struct at86rf230_local *lp = ctx->lp;
 
 	at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
-				     at86rf230_tx_complete, true);
+				     at86rf230_tx_complete);
 }
 
 static void
@@ -697,8 +688,7 @@
 		}
 	}
 
-	at86rf230_async_state_change(lp, &lp->irq, STATE_TX_ON,
-				     at86rf230_tx_on, true);
+	at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_tx_on);
 }
 
 static void
@@ -706,7 +696,6 @@
 {
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
-	u8 rx_local_buf[AT86RF2XX_MAX_BUF];
 	const u8 *buf = ctx->buf;
 	struct sk_buff *skb;
 	u8 len, lqi;
@@ -718,18 +707,16 @@
 	}
 	lqi = buf[2 + len];
 
-	memcpy(rx_local_buf, buf + 2, len);
-	ctx->trx.len = 2;
-	enable_irq(ctx->irq);
-
 	skb = dev_alloc_skb(IEEE802154_MTU);
 	if (!skb) {
 		dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n");
+		kfree(ctx);
 		return;
 	}
 
-	memcpy(skb_put(skb, len), rx_local_buf, len);
+	memcpy(skb_put(skb, len), buf + 2, len);
 	ieee802154_rx_irqsafe(lp->hw, skb, lqi);
+	kfree(ctx);
 }
 
 static void
@@ -765,21 +752,23 @@
 	rc = spi_async(lp->spi, &ctx->msg);
 	if (rc) {
 		ctx->trx.len = 2;
-		enable_irq(ctx->irq);
 		at86rf230_async_error(lp, ctx, rc);
 	}
 }
 
 static void
-at86rf230_irq_trx_end(struct at86rf230_local *lp)
+at86rf230_irq_trx_end(void *context)
 {
+	struct at86rf230_state_change *ctx = context;
+	struct at86rf230_local *lp = ctx->lp;
+
 	if (lp->is_tx) {
 		lp->is_tx = 0;
-		at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
-					 at86rf230_tx_trac_check, true);
+		at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
+					 at86rf230_tx_trac_check);
 	} else {
-		at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
-					 at86rf230_rx_trac_check, true);
+		at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
+					 at86rf230_rx_trac_check);
 	}
 }
 
@@ -789,32 +778,59 @@
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
 	const u8 *buf = ctx->buf;
-	const u8 irq = buf[1];
+	u8 irq = buf[1];
+
+	enable_irq(lp->spi->irq);
 
 	if (irq & IRQ_TRX_END) {
-		at86rf230_irq_trx_end(lp);
+		at86rf230_irq_trx_end(ctx);
 	} else {
-		enable_irq(ctx->irq);
 		dev_err(&lp->spi->dev, "not supported irq %02x received\n",
 			irq);
+		kfree(ctx);
 	}
 }
 
+static void
+at86rf230_setup_spi_messages(struct at86rf230_local *lp,
+			     struct at86rf230_state_change *state)
+{
+	state->lp = lp;
+	state->irq = lp->spi->irq;
+	spi_message_init(&state->msg);
+	state->msg.context = state;
+	state->trx.len = 2;
+	state->trx.tx_buf = state->buf;
+	state->trx.rx_buf = state->buf;
+	spi_message_add_tail(&state->trx, &state->msg);
+	hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	state->timer.function = at86rf230_async_state_timer;
+}
+
 static irqreturn_t at86rf230_isr(int irq, void *data)
 {
 	struct at86rf230_local *lp = data;
-	struct at86rf230_state_change *ctx = &lp->irq;
-	u8 *buf = ctx->buf;
+	struct at86rf230_state_change *ctx;
 	int rc;
 
 	disable_irq_nosync(irq);
 
-	buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
+	ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+	if (!ctx) {
+		enable_irq(irq);
+		return IRQ_NONE;
+	}
+
+	at86rf230_setup_spi_messages(lp, ctx);
+	/* tell on error handling to free ctx */
+	ctx->free = true;
+
+	ctx->buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
 	ctx->msg.complete = at86rf230_irq_status;
 	rc = spi_async(lp->spi, &ctx->msg);
 	if (rc) {
-		enable_irq(irq);
 		at86rf230_async_error(lp, ctx, rc);
+		enable_irq(irq);
 		return IRQ_NONE;
 	}
 
@@ -826,21 +842,14 @@
 {
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
-	u8 *buf = ctx->buf;
-	int rc;
 
 	ctx->trx.len = 2;
 
-	if (gpio_is_valid(lp->slp_tr)) {
+	if (gpio_is_valid(lp->slp_tr))
 		at86rf230_slp_tr_rising_edge(lp);
-	} else {
-		buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
-		buf[1] = STATE_BUSY_TX;
-		ctx->msg.complete = NULL;
-		rc = spi_async(lp->spi, &ctx->msg);
-		if (rc)
-			at86rf230_async_error(lp, ctx, rc);
-	}
+	else
+		at86rf230_async_write_reg(lp, RG_TRX_STATE, STATE_BUSY_TX, ctx,
+					  NULL);
 }
 
 static void
@@ -873,7 +882,7 @@
 	struct at86rf230_local *lp = ctx->lp;
 
 	at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
-				     at86rf230_write_frame, false);
+				     at86rf230_write_frame);
 }
 
 static void
@@ -886,12 +895,10 @@
 	if (lp->is_tx_from_off) {
 		lp->is_tx_from_off = false;
 		at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
-					     at86rf230_write_frame,
-					     false);
+					     at86rf230_write_frame);
 	} else {
 		at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
-					     at86rf230_xmit_tx_on,
-					     false);
+					     at86rf230_xmit_tx_on);
 	}
 }
 
@@ -914,7 +921,7 @@
 	if (time_is_before_jiffies(lp->cal_timeout)) {
 		lp->is_tx_from_off = true;
 		at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF,
-					     at86rf230_xmit_start, false);
+					     at86rf230_xmit_start);
 	} else {
 		at86rf230_xmit_start(ctx);
 	}
@@ -1373,10 +1380,6 @@
 		return rc;
 
 	irq_type = irq_get_trigger_type(lp->spi->irq);
-	if (irq_type == IRQ_TYPE_EDGE_RISING ||
-	    irq_type == IRQ_TYPE_EDGE_FALLING)
-		dev_warn(&lp->spi->dev,
-			 "Using edge triggered irq's are not recommended, because it can cause races and result in a non-functional driver!\n");
 	if (irq_type == IRQ_TYPE_EDGE_FALLING ||
 	    irq_type == IRQ_TYPE_LEVEL_LOW)
 		irq_pol = IRQ_ACTIVE_LOW;
@@ -1602,43 +1605,6 @@
 	return rc;
 }
 
-static void
-at86rf230_setup_spi_messages(struct at86rf230_local *lp)
-{
-	lp->state.lp = lp;
-	lp->state.irq = lp->spi->irq;
-	spi_message_init(&lp->state.msg);
-	lp->state.msg.context = &lp->state;
-	lp->state.trx.len = 2;
-	lp->state.trx.tx_buf = lp->state.buf;
-	lp->state.trx.rx_buf = lp->state.buf;
-	spi_message_add_tail(&lp->state.trx, &lp->state.msg);
-	hrtimer_init(&lp->state.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	lp->state.timer.function = at86rf230_async_state_timer;
-
-	lp->irq.lp = lp;
-	lp->irq.irq = lp->spi->irq;
-	spi_message_init(&lp->irq.msg);
-	lp->irq.msg.context = &lp->irq;
-	lp->irq.trx.len = 2;
-	lp->irq.trx.tx_buf = lp->irq.buf;
-	lp->irq.trx.rx_buf = lp->irq.buf;
-	spi_message_add_tail(&lp->irq.trx, &lp->irq.msg);
-	hrtimer_init(&lp->irq.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	lp->irq.timer.function = at86rf230_async_state_timer;
-
-	lp->tx.lp = lp;
-	lp->tx.irq = lp->spi->irq;
-	spi_message_init(&lp->tx.msg);
-	lp->tx.msg.context = &lp->tx;
-	lp->tx.trx.len = 2;
-	lp->tx.trx.tx_buf = lp->tx.buf;
-	lp->tx.trx.rx_buf = lp->tx.buf;
-	spi_message_add_tail(&lp->tx.trx, &lp->tx.msg);
-	hrtimer_init(&lp->tx.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	lp->tx.timer.function = at86rf230_async_state_timer;
-}
-
 #ifdef CONFIG_IEEE802154_AT86RF230_DEBUGFS
 static struct dentry *at86rf230_debugfs_root;
 
@@ -1760,7 +1726,8 @@
 		goto free_dev;
 	}
 
-	at86rf230_setup_spi_messages(lp);
+	at86rf230_setup_spi_messages(lp, &lp->state);
+	at86rf230_setup_spi_messages(lp, &lp->tx);
 
 	rc = at86rf230_detect_device(lp);
 	if (rc < 0)
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 997724b..aca0fb3 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -18,51 +18,172 @@
 #include <linux/spi/spi.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/regmap.h>
 #include <linux/ieee802154.h>
+#include <linux/irq.h>
 #include <net/cfg802154.h>
 #include <net/mac802154.h>
 
 /* MRF24J40 Short Address Registers */
-#define REG_RXMCR    0x00  /* Receive MAC control */
-#define REG_PANIDL   0x01  /* PAN ID (low) */
-#define REG_PANIDH   0x02  /* PAN ID (high) */
-#define REG_SADRL    0x03  /* Short address (low) */
-#define REG_SADRH    0x04  /* Short address (high) */
-#define REG_EADR0    0x05  /* Long address (low) (high is EADR7) */
-#define REG_TXMCR    0x11  /* Transmit MAC control */
-#define REG_PACON0   0x16  /* Power Amplifier Control */
-#define REG_PACON1   0x17  /* Power Amplifier Control */
-#define REG_PACON2   0x18  /* Power Amplifier Control */
-#define REG_TXNCON   0x1B  /* Transmit Normal FIFO Control */
-#define REG_TXSTAT   0x24  /* TX MAC Status Register */
-#define REG_SOFTRST  0x2A  /* Soft Reset */
-#define REG_TXSTBL   0x2E  /* TX Stabilization */
-#define REG_INTSTAT  0x31  /* Interrupt Status */
-#define REG_INTCON   0x32  /* Interrupt Control */
-#define REG_GPIO     0x33  /* GPIO */
-#define REG_TRISGPIO 0x34  /* GPIO direction */
-#define REG_RFCTL    0x36  /* RF Control Mode Register */
-#define REG_BBREG1   0x39  /* Baseband Registers */
-#define REG_BBREG2   0x3A  /* */
-#define REG_BBREG6   0x3E  /* */
-#define REG_CCAEDTH  0x3F  /* Energy Detection Threshold */
+#define REG_RXMCR	0x00  /* Receive MAC control */
+#define BIT_PROMI	BIT(0)
+#define BIT_ERRPKT	BIT(1)
+#define BIT_NOACKRSP	BIT(5)
+#define BIT_PANCOORD	BIT(3)
+
+#define REG_PANIDL	0x01  /* PAN ID (low) */
+#define REG_PANIDH	0x02  /* PAN ID (high) */
+#define REG_SADRL	0x03  /* Short address (low) */
+#define REG_SADRH	0x04  /* Short address (high) */
+#define REG_EADR0	0x05  /* Long address (low) (high is EADR7) */
+#define REG_EADR1	0x06
+#define REG_EADR2	0x07
+#define REG_EADR3	0x08
+#define REG_EADR4	0x09
+#define REG_EADR5	0x0A
+#define REG_EADR6	0x0B
+#define REG_EADR7	0x0C
+#define REG_RXFLUSH	0x0D
+#define REG_ORDER	0x10
+#define REG_TXMCR	0x11  /* Transmit MAC control */
+#define TXMCR_MIN_BE_SHIFT		3
+#define TXMCR_MIN_BE_MASK		0x18
+#define TXMCR_CSMA_RETRIES_SHIFT	0
+#define TXMCR_CSMA_RETRIES_MASK		0x07
+
+#define REG_ACKTMOUT	0x12
+#define REG_ESLOTG1	0x13
+#define REG_SYMTICKL	0x14
+#define REG_SYMTICKH	0x15
+#define REG_PACON0	0x16  /* Power Amplifier Control */
+#define REG_PACON1	0x17  /* Power Amplifier Control */
+#define REG_PACON2	0x18  /* Power Amplifier Control */
+#define REG_TXBCON0	0x1A
+#define REG_TXNCON	0x1B  /* Transmit Normal FIFO Control */
+#define BIT_TXNTRIG	BIT(0)
+#define BIT_TXNACKREQ	BIT(2)
+
+#define REG_TXG1CON	0x1C
+#define REG_TXG2CON	0x1D
+#define REG_ESLOTG23	0x1E
+#define REG_ESLOTG45	0x1F
+#define REG_ESLOTG67	0x20
+#define REG_TXPEND	0x21
+#define REG_WAKECON	0x22
+#define REG_FROMOFFSET	0x23
+#define REG_TXSTAT	0x24  /* TX MAC Status Register */
+#define REG_TXBCON1	0x25
+#define REG_GATECLK	0x26
+#define REG_TXTIME	0x27
+#define REG_HSYMTMRL	0x28
+#define REG_HSYMTMRH	0x29
+#define REG_SOFTRST	0x2A  /* Soft Reset */
+#define REG_SECCON0	0x2C
+#define REG_SECCON1	0x2D
+#define REG_TXSTBL	0x2E  /* TX Stabilization */
+#define REG_RXSR	0x30
+#define REG_INTSTAT	0x31  /* Interrupt Status */
+#define BIT_TXNIF	BIT(0)
+#define BIT_RXIF	BIT(3)
+
+#define REG_INTCON	0x32  /* Interrupt Control */
+#define BIT_TXNIE	BIT(0)
+#define BIT_RXIE	BIT(3)
+
+#define REG_GPIO	0x33  /* GPIO */
+#define REG_TRISGPIO	0x34  /* GPIO direction */
+#define REG_SLPACK	0x35
+#define REG_RFCTL	0x36  /* RF Control Mode Register */
+#define BIT_RFRST	BIT(2)
+
+#define REG_SECCR2	0x37
+#define REG_BBREG0	0x38
+#define REG_BBREG1	0x39  /* Baseband Registers */
+#define BIT_RXDECINV	BIT(2)
+
+#define REG_BBREG2	0x3A  /* */
+#define BBREG2_CCA_MODE_SHIFT	6
+#define BBREG2_CCA_MODE_MASK	0xc0
+
+#define REG_BBREG3	0x3B
+#define REG_BBREG4	0x3C
+#define REG_BBREG6	0x3E  /* */
+#define REG_CCAEDTH	0x3F  /* Energy Detection Threshold */
 
 /* MRF24J40 Long Address Registers */
-#define REG_RFCON0     0x200  /* RF Control Registers */
-#define REG_RFCON1     0x201
-#define REG_RFCON2     0x202
-#define REG_RFCON3     0x203
-#define REG_RFCON5     0x205
-#define REG_RFCON6     0x206
-#define REG_RFCON7     0x207
-#define REG_RFCON8     0x208
-#define REG_RSSI       0x210
-#define REG_SLPCON0    0x211  /* Sleep Clock Control Registers */
-#define REG_SLPCON1    0x220
-#define REG_WAKETIMEL  0x222  /* Wake-up Time Match Value Low */
-#define REG_WAKETIMEH  0x223  /* Wake-up Time Match Value High */
-#define REG_TESTMODE   0x22F  /* Test mode */
-#define REG_RX_FIFO    0x300  /* Receive FIFO */
+#define REG_RFCON0	0x200  /* RF Control Registers */
+#define RFCON0_CH_SHIFT	4
+#define RFCON0_CH_MASK	0xf0
+#define RFOPT_RECOMMEND	3
+
+#define REG_RFCON1	0x201
+#define REG_RFCON2	0x202
+#define REG_RFCON3	0x203
+
+#define TXPWRL_MASK	0xc0
+#define TXPWRL_SHIFT	6
+#define TXPWRL_30	0x3
+#define TXPWRL_20	0x2
+#define TXPWRL_10	0x1
+#define TXPWRL_0	0x0
+
+#define TXPWRS_MASK	0x38
+#define TXPWRS_SHIFT	3
+#define TXPWRS_6_3	0x7
+#define TXPWRS_4_9	0x6
+#define TXPWRS_3_7	0x5
+#define TXPWRS_2_8	0x4
+#define TXPWRS_1_9	0x3
+#define TXPWRS_1_2	0x2
+#define TXPWRS_0_5	0x1
+#define TXPWRS_0	0x0
+
+#define REG_RFCON5	0x205
+#define REG_RFCON6	0x206
+#define REG_RFCON7	0x207
+#define REG_RFCON8	0x208
+#define REG_SLPCAL0	0x209
+#define REG_SLPCAL1	0x20A
+#define REG_SLPCAL2	0x20B
+#define REG_RFSTATE	0x20F
+#define REG_RSSI	0x210
+#define REG_SLPCON0	0x211  /* Sleep Clock Control Registers */
+#define BIT_INTEDGE	BIT(1)
+
+#define REG_SLPCON1	0x220
+#define REG_WAKETIMEL	0x222  /* Wake-up Time Match Value Low */
+#define REG_WAKETIMEH	0x223  /* Wake-up Time Match Value High */
+#define REG_REMCNTL	0x224
+#define REG_REMCNTH	0x225
+#define REG_MAINCNT0	0x226
+#define REG_MAINCNT1	0x227
+#define REG_MAINCNT2	0x228
+#define REG_MAINCNT3	0x229
+#define REG_TESTMODE	0x22F  /* Test mode */
+#define REG_ASSOEAR0	0x230
+#define REG_ASSOEAR1	0x231
+#define REG_ASSOEAR2	0x232
+#define REG_ASSOEAR3	0x233
+#define REG_ASSOEAR4	0x234
+#define REG_ASSOEAR5	0x235
+#define REG_ASSOEAR6	0x236
+#define REG_ASSOEAR7	0x237
+#define REG_ASSOSAR0	0x238
+#define REG_ASSOSAR1	0x239
+#define REG_UNONCE0	0x240
+#define REG_UNONCE1	0x241
+#define REG_UNONCE2	0x242
+#define REG_UNONCE3	0x243
+#define REG_UNONCE4	0x244
+#define REG_UNONCE5	0x245
+#define REG_UNONCE6	0x246
+#define REG_UNONCE7	0x247
+#define REG_UNONCE8	0x248
+#define REG_UNONCE9	0x249
+#define REG_UNONCE10	0x24A
+#define REG_UNONCE11	0x24B
+#define REG_UNONCE12	0x24C
+#define REG_RX_FIFO	0x300  /* Receive FIFO */
 
 /* Device configuration: Only channels 11-26 on page 0 are supported. */
 #define MRF24J40_CHAN_MIN 11
@@ -81,11 +202,52 @@
 	struct spi_device *spi;
 	struct ieee802154_hw *hw;
 
-	struct mutex buffer_mutex; /* only used to protect buf */
-	struct completion tx_complete;
-	u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
+	struct regmap *regmap_short;
+	struct regmap *regmap_long;
+
+	/* for writing txfifo */
+	struct spi_message tx_msg;
+	u8 tx_hdr_buf[2];
+	struct spi_transfer tx_hdr_trx;
+	u8 tx_len_buf[2];
+	struct spi_transfer tx_len_trx;
+	struct spi_transfer tx_buf_trx;
+	struct sk_buff *tx_skb;
+
+	/* post transmit message to send frame out  */
+	struct spi_message tx_post_msg;
+	u8 tx_post_buf[2];
+	struct spi_transfer tx_post_trx;
+
+	/* for protect/unprotect/read length rxfifo */
+	struct spi_message rx_msg;
+	u8 rx_buf[3];
+	struct spi_transfer rx_trx;
+
+	/* receive handling */
+	struct spi_message rx_buf_msg;
+	u8 rx_addr_buf[2];
+	struct spi_transfer rx_addr_trx;
+	u8 rx_lqi_buf[2];
+	struct spi_transfer rx_lqi_trx;
+	u8 rx_fifo_buf[RX_FIFO_SIZE];
+	struct spi_transfer rx_fifo_buf_trx;
+
+	/* isr handling for reading intstat */
+	struct spi_message irq_msg;
+	u8 irq_buf[2];
+	struct spi_transfer irq_trx;
 };
 
+/* regmap information for short address register access */
+#define MRF24J40_SHORT_WRITE	0x01
+#define MRF24J40_SHORT_READ	0x00
+#define MRF24J40_SHORT_NUMREGS	0x3F
+
+/* regmap information for long address register access */
+#define MRF24J40_LONG_ACCESS	0x80
+#define MRF24J40_LONG_NUMREGS	0x38F
+
 /* Read/Write SPI Commands for Short and Long Address registers. */
 #define MRF24J40_READSHORT(reg) ((reg) << 1)
 #define MRF24J40_WRITESHORT(reg) ((reg) << 1 | 1)
@@ -97,118 +259,304 @@
 
 #define printdev(X) (&X->spi->dev)
 
-static int write_short_reg(struct mrf24j40 *devrec, u8 reg, u8 value)
+static bool
+mrf24j40_short_reg_writeable(struct device *dev, unsigned int reg)
 {
-	int ret;
-	struct spi_message msg;
-	struct spi_transfer xfer = {
-		.len = 2,
-		.tx_buf = devrec->buf,
-		.rx_buf = devrec->buf,
-	};
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	mutex_lock(&devrec->buffer_mutex);
-	devrec->buf[0] = MRF24J40_WRITESHORT(reg);
-	devrec->buf[1] = value;
-
-	ret = spi_sync(devrec->spi, &msg);
-	if (ret)
-		dev_err(printdev(devrec),
-			"SPI write Failed for short register 0x%hhx\n", reg);
-
-	mutex_unlock(&devrec->buffer_mutex);
-	return ret;
+	switch (reg) {
+	case REG_RXMCR:
+	case REG_PANIDL:
+	case REG_PANIDH:
+	case REG_SADRL:
+	case REG_SADRH:
+	case REG_EADR0:
+	case REG_EADR1:
+	case REG_EADR2:
+	case REG_EADR3:
+	case REG_EADR4:
+	case REG_EADR5:
+	case REG_EADR6:
+	case REG_EADR7:
+	case REG_RXFLUSH:
+	case REG_ORDER:
+	case REG_TXMCR:
+	case REG_ACKTMOUT:
+	case REG_ESLOTG1:
+	case REG_SYMTICKL:
+	case REG_SYMTICKH:
+	case REG_PACON0:
+	case REG_PACON1:
+	case REG_PACON2:
+	case REG_TXBCON0:
+	case REG_TXNCON:
+	case REG_TXG1CON:
+	case REG_TXG2CON:
+	case REG_ESLOTG23:
+	case REG_ESLOTG45:
+	case REG_ESLOTG67:
+	case REG_TXPEND:
+	case REG_WAKECON:
+	case REG_FROMOFFSET:
+	case REG_TXBCON1:
+	case REG_GATECLK:
+	case REG_TXTIME:
+	case REG_HSYMTMRL:
+	case REG_HSYMTMRH:
+	case REG_SOFTRST:
+	case REG_SECCON0:
+	case REG_SECCON1:
+	case REG_TXSTBL:
+	case REG_RXSR:
+	case REG_INTCON:
+	case REG_TRISGPIO:
+	case REG_GPIO:
+	case REG_RFCTL:
+	case REG_SLPACK:
+	case REG_BBREG0:
+	case REG_BBREG1:
+	case REG_BBREG2:
+	case REG_BBREG3:
+	case REG_BBREG4:
+	case REG_BBREG6:
+	case REG_CCAEDTH:
+		return true;
+	default:
+		return false;
+	}
 }
 
-static int read_short_reg(struct mrf24j40 *devrec, u8 reg, u8 *val)
+static bool
+mrf24j40_short_reg_readable(struct device *dev, unsigned int reg)
 {
-	int ret = -1;
-	struct spi_message msg;
-	struct spi_transfer xfer = {
-		.len = 2,
-		.tx_buf = devrec->buf,
-		.rx_buf = devrec->buf,
-	};
+	bool rc;
 
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
+	/* all writeable are also readable */
+	rc = mrf24j40_short_reg_writeable(dev, reg);
+	if (rc)
+		return rc;
 
-	mutex_lock(&devrec->buffer_mutex);
-	devrec->buf[0] = MRF24J40_READSHORT(reg);
-	devrec->buf[1] = 0;
-
-	ret = spi_sync(devrec->spi, &msg);
-	if (ret)
-		dev_err(printdev(devrec),
-			"SPI read Failed for short register 0x%hhx\n", reg);
-	else
-		*val = devrec->buf[1];
-
-	mutex_unlock(&devrec->buffer_mutex);
-	return ret;
+	/* readonly regs */
+	switch (reg) {
+	case REG_TXSTAT:
+	case REG_INTSTAT:
+		return true;
+	default:
+		return false;
+	}
 }
 
-static int read_long_reg(struct mrf24j40 *devrec, u16 reg, u8 *value)
+static bool
+mrf24j40_short_reg_volatile(struct device *dev, unsigned int reg)
 {
-	int ret;
-	u16 cmd;
-	struct spi_message msg;
-	struct spi_transfer xfer = {
-		.len = 3,
-		.tx_buf = devrec->buf,
-		.rx_buf = devrec->buf,
-	};
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	cmd = MRF24J40_READLONG(reg);
-	mutex_lock(&devrec->buffer_mutex);
-	devrec->buf[0] = cmd >> 8 & 0xff;
-	devrec->buf[1] = cmd & 0xff;
-	devrec->buf[2] = 0;
-
-	ret = spi_sync(devrec->spi, &msg);
-	if (ret)
-		dev_err(printdev(devrec),
-			"SPI read Failed for long register 0x%hx\n", reg);
-	else
-		*value = devrec->buf[2];
-
-	mutex_unlock(&devrec->buffer_mutex);
-	return ret;
+	/* can be changed during runtime */
+	switch (reg) {
+	case REG_TXSTAT:
+	case REG_INTSTAT:
+	case REG_RXFLUSH:
+	case REG_TXNCON:
+	case REG_SOFTRST:
+	case REG_RFCTL:
+	case REG_TXBCON0:
+	case REG_TXG1CON:
+	case REG_TXG2CON:
+	case REG_TXBCON1:
+	case REG_SECCON0:
+	case REG_RXSR:
+	case REG_SLPACK:
+	case REG_SECCR2:
+	case REG_BBREG6:
+	/* use them in spi_async and regmap so it's volatile */
+	case REG_BBREG1:
+		return true;
+	default:
+		return false;
+	}
 }
 
-static int write_long_reg(struct mrf24j40 *devrec, u16 reg, u8 val)
+static bool
+mrf24j40_short_reg_precious(struct device *dev, unsigned int reg)
 {
+	/* don't clear irq line on read */
+	switch (reg) {
+	case REG_INTSTAT:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct regmap_config mrf24j40_short_regmap = {
+	.name = "mrf24j40_short",
+	.reg_bits = 7,
+	.val_bits = 8,
+	.pad_bits = 1,
+	.write_flag_mask = MRF24J40_SHORT_WRITE,
+	.read_flag_mask = MRF24J40_SHORT_READ,
+	.cache_type = REGCACHE_RBTREE,
+	.max_register = MRF24J40_SHORT_NUMREGS,
+	.writeable_reg = mrf24j40_short_reg_writeable,
+	.readable_reg = mrf24j40_short_reg_readable,
+	.volatile_reg = mrf24j40_short_reg_volatile,
+	.precious_reg = mrf24j40_short_reg_precious,
+};
+
+static bool
+mrf24j40_long_reg_writeable(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case REG_RFCON0:
+	case REG_RFCON1:
+	case REG_RFCON2:
+	case REG_RFCON3:
+	case REG_RFCON5:
+	case REG_RFCON6:
+	case REG_RFCON7:
+	case REG_RFCON8:
+	case REG_SLPCAL2:
+	case REG_SLPCON0:
+	case REG_SLPCON1:
+	case REG_WAKETIMEL:
+	case REG_WAKETIMEH:
+	case REG_REMCNTL:
+	case REG_REMCNTH:
+	case REG_MAINCNT0:
+	case REG_MAINCNT1:
+	case REG_MAINCNT2:
+	case REG_MAINCNT3:
+	case REG_TESTMODE:
+	case REG_ASSOEAR0:
+	case REG_ASSOEAR1:
+	case REG_ASSOEAR2:
+	case REG_ASSOEAR3:
+	case REG_ASSOEAR4:
+	case REG_ASSOEAR5:
+	case REG_ASSOEAR6:
+	case REG_ASSOEAR7:
+	case REG_ASSOSAR0:
+	case REG_ASSOSAR1:
+	case REG_UNONCE0:
+	case REG_UNONCE1:
+	case REG_UNONCE2:
+	case REG_UNONCE3:
+	case REG_UNONCE4:
+	case REG_UNONCE5:
+	case REG_UNONCE6:
+	case REG_UNONCE7:
+	case REG_UNONCE8:
+	case REG_UNONCE9:
+	case REG_UNONCE10:
+	case REG_UNONCE11:
+	case REG_UNONCE12:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool
+mrf24j40_long_reg_readable(struct device *dev, unsigned int reg)
+{
+	bool rc;
+
+	/* all writeable are also readable */
+	rc = mrf24j40_long_reg_writeable(dev, reg);
+	if (rc)
+		return rc;
+
+	/* readonly regs */
+	switch (reg) {
+	case REG_SLPCAL0:
+	case REG_SLPCAL1:
+	case REG_RFSTATE:
+	case REG_RSSI:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool
+mrf24j40_long_reg_volatile(struct device *dev, unsigned int reg)
+{
+	/* can be changed during runtime */
+	switch (reg) {
+	case REG_SLPCAL0:
+	case REG_SLPCAL1:
+	case REG_SLPCAL2:
+	case REG_RFSTATE:
+	case REG_RSSI:
+	case REG_MAINCNT3:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct regmap_config mrf24j40_long_regmap = {
+	.name = "mrf24j40_long",
+	.reg_bits = 11,
+	.val_bits = 8,
+	.pad_bits = 5,
+	.write_flag_mask = MRF24J40_LONG_ACCESS,
+	.read_flag_mask = MRF24J40_LONG_ACCESS,
+	.cache_type = REGCACHE_RBTREE,
+	.max_register = MRF24J40_LONG_NUMREGS,
+	.writeable_reg = mrf24j40_long_reg_writeable,
+	.readable_reg = mrf24j40_long_reg_readable,
+	.volatile_reg = mrf24j40_long_reg_volatile,
+};
+
+static int mrf24j40_long_regmap_write(void *context, const void *data,
+				      size_t count)
+{
+	struct spi_device *spi = context;
+	u8 buf[3];
+
+	if (count > 3)
+		return -EINVAL;
+
+	/* regmap supports read/write mask only in frist byte
+	 * long write access need to set the 12th bit, so we
+	 * make special handling for write.
+	 */
+	memcpy(buf, data, count);
+	buf[1] |= (1 << 4);
+
+	return spi_write(spi, buf, count);
+}
+
+static int
+mrf24j40_long_regmap_read(void *context, const void *reg, size_t reg_size,
+			  void *val, size_t val_size)
+{
+	struct spi_device *spi = context;
+
+	return spi_write_then_read(spi, reg, reg_size, val, val_size);
+}
+
+static const struct regmap_bus mrf24j40_long_regmap_bus = {
+	.write = mrf24j40_long_regmap_write,
+	.read = mrf24j40_long_regmap_read,
+	.reg_format_endian_default = REGMAP_ENDIAN_BIG,
+	.val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static void write_tx_buf_complete(void *context)
+{
+	struct mrf24j40 *devrec = context;
+	__le16 fc = ieee802154_get_fc_from_skb(devrec->tx_skb);
+	u8 val = BIT_TXNTRIG;
 	int ret;
-	u16 cmd;
-	struct spi_message msg;
-	struct spi_transfer xfer = {
-		.len = 3,
-		.tx_buf = devrec->buf,
-		.rx_buf = devrec->buf,
-	};
 
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
+	if (ieee802154_is_ackreq(fc))
+		val |= BIT_TXNACKREQ;
 
-	cmd = MRF24J40_WRITELONG(reg);
-	mutex_lock(&devrec->buffer_mutex);
-	devrec->buf[0] = cmd >> 8 & 0xff;
-	devrec->buf[1] = cmd & 0xff;
-	devrec->buf[2] = val;
+	devrec->tx_post_msg.complete = NULL;
+	devrec->tx_post_buf[0] = MRF24J40_WRITESHORT(REG_TXNCON);
+	devrec->tx_post_buf[1] = val;
 
-	ret = spi_sync(devrec->spi, &msg);
+	ret = spi_async(devrec->spi, &devrec->tx_post_msg);
 	if (ret)
-		dev_err(printdev(devrec),
-			"SPI write Failed for long register 0x%hx\n", reg);
-
-	mutex_unlock(&devrec->buffer_mutex);
-	return ret;
+		dev_err(printdev(devrec), "SPI write Failed for transmit buf\n");
 }
 
 /* This function relies on an undocumented write method. Once a write command
@@ -217,22 +565,8 @@
 static int write_tx_buf(struct mrf24j40 *devrec, u16 reg,
 			const u8 *data, size_t length)
 {
-	int ret;
 	u16 cmd;
-	u8 lengths[2];
-	struct spi_message msg;
-	struct spi_transfer addr_xfer = {
-		.len = 2,
-		.tx_buf = devrec->buf,
-	};
-	struct spi_transfer lengths_xfer = {
-		.len = 2,
-		.tx_buf = &lengths, /* TODO: Is DMA really required for SPI? */
-	};
-	struct spi_transfer data_xfer = {
-		.len = length,
-		.tx_buf = data,
-	};
+	int ret;
 
 	/* Range check the length. 2 bytes are used for the length fields.*/
 	if (length > TX_FIFO_SIZE-2) {
@@ -240,147 +574,29 @@
 		length = TX_FIFO_SIZE-2;
 	}
 
-	spi_message_init(&msg);
-	spi_message_add_tail(&addr_xfer, &msg);
-	spi_message_add_tail(&lengths_xfer, &msg);
-	spi_message_add_tail(&data_xfer, &msg);
-
 	cmd = MRF24J40_WRITELONG(reg);
-	mutex_lock(&devrec->buffer_mutex);
-	devrec->buf[0] = cmd >> 8 & 0xff;
-	devrec->buf[1] = cmd & 0xff;
-	lengths[0] = 0x0; /* Header Length. Set to 0 for now. TODO */
-	lengths[1] = length; /* Total length */
+	devrec->tx_hdr_buf[0] = cmd >> 8 & 0xff;
+	devrec->tx_hdr_buf[1] = cmd & 0xff;
+	devrec->tx_len_buf[0] = 0x0; /* Header Length. Set to 0 for now. TODO */
+	devrec->tx_len_buf[1] = length; /* Total length */
+	devrec->tx_buf_trx.tx_buf = data;
+	devrec->tx_buf_trx.len = length;
 
-	ret = spi_sync(devrec->spi, &msg);
+	ret = spi_async(devrec->spi, &devrec->tx_msg);
 	if (ret)
 		dev_err(printdev(devrec), "SPI write Failed for TX buf\n");
 
-	mutex_unlock(&devrec->buffer_mutex);
-	return ret;
-}
-
-static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec,
-				u8 *data, u8 *len, u8 *lqi)
-{
-	u8 rx_len;
-	u8 addr[2];
-	u8 lqi_rssi[2];
-	u16 cmd;
-	int ret;
-	struct spi_message msg;
-	struct spi_transfer addr_xfer = {
-		.len = 2,
-		.tx_buf = &addr,
-	};
-	struct spi_transfer data_xfer = {
-		.len = 0x0, /* set below */
-		.rx_buf = data,
-	};
-	struct spi_transfer status_xfer = {
-		.len = 2,
-		.rx_buf = &lqi_rssi,
-	};
-
-	/* Get the length of the data in the RX FIFO. The length in this
-	 * register exclues the 1-byte length field at the beginning. */
-	ret = read_long_reg(devrec, REG_RX_FIFO, &rx_len);
-	if (ret)
-		goto out;
-
-	/* Range check the RX FIFO length, accounting for the one-byte
-	 * length field at the beginning. */
-	if (rx_len > RX_FIFO_SIZE-1) {
-		dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n");
-		rx_len = RX_FIFO_SIZE-1;
-	}
-
-	if (rx_len > *len) {
-		/* Passed in buffer wasn't big enough. Should never happen. */
-		dev_err(printdev(devrec), "Buffer not big enough. Performing short read\n");
-		rx_len = *len;
-	}
-
-	/* Set up the commands to read the data. */
-	cmd = MRF24J40_READLONG(REG_RX_FIFO+1);
-	addr[0] = cmd >> 8 & 0xff;
-	addr[1] = cmd & 0xff;
-	data_xfer.len = rx_len;
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&addr_xfer, &msg);
-	spi_message_add_tail(&data_xfer, &msg);
-	spi_message_add_tail(&status_xfer, &msg);
-
-	ret = spi_sync(devrec->spi, &msg);
-	if (ret) {
-		dev_err(printdev(devrec), "SPI RX Buffer Read Failed.\n");
-		goto out;
-	}
-
-	*lqi = lqi_rssi[0];
-	*len = rx_len;
-
-#ifdef DEBUG
-	print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ",
-		       DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0);
-	pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
-		 lqi_rssi[0], lqi_rssi[1]);
-#endif
-
-out:
 	return ret;
 }
 
 static int mrf24j40_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
 {
 	struct mrf24j40 *devrec = hw->priv;
-	u8 val;
-	int ret = 0;
 
 	dev_dbg(printdev(devrec), "tx packet of %d bytes\n", skb->len);
+	devrec->tx_skb = skb;
 
-	ret = write_tx_buf(devrec, 0x000, skb->data, skb->len);
-	if (ret)
-		goto err;
-
-	reinit_completion(&devrec->tx_complete);
-
-	/* Set TXNTRIG bit of TXNCON to send packet */
-	ret = read_short_reg(devrec, REG_TXNCON, &val);
-	if (ret)
-		goto err;
-	val |= 0x1;
-	/* Set TXNACKREQ if the ACK bit is set in the packet. */
-	if (skb->data[0] & IEEE802154_FC_ACK_REQ)
-		val |= 0x4;
-	write_short_reg(devrec, REG_TXNCON, val);
-
-	/* Wait for the device to send the TX complete interrupt. */
-	ret = wait_for_completion_interruptible_timeout(
-						&devrec->tx_complete,
-						5 * HZ);
-	if (ret == -ERESTARTSYS)
-		goto err;
-	if (ret == 0) {
-		dev_warn(printdev(devrec), "Timeout waiting for TX interrupt\n");
-		ret = -ETIMEDOUT;
-		goto err;
-	}
-
-	/* Check for send error from the device. */
-	ret = read_short_reg(devrec, REG_TXSTAT, &val);
-	if (ret)
-		goto err;
-	if (val & 0x1) {
-		dev_dbg(printdev(devrec), "Error Sending. Retry count exceeded\n");
-		ret = -ECOMM; /* TODO: Better error code ? */
-	} else
-		dev_dbg(printdev(devrec), "Packet Sent\n");
-
-err:
-
-	return ret;
+	return write_tx_buf(devrec, 0x000, skb->data, skb->len);
 }
 
 static int mrf24j40_ed(struct ieee802154_hw *hw, u8 *level)
@@ -394,33 +610,23 @@
 static int mrf24j40_start(struct ieee802154_hw *hw)
 {
 	struct mrf24j40 *devrec = hw->priv;
-	u8 val;
-	int ret;
 
 	dev_dbg(printdev(devrec), "start\n");
 
-	ret = read_short_reg(devrec, REG_INTCON, &val);
-	if (ret)
-		return ret;
-	val &= ~(0x1|0x8); /* Clear TXNIE and RXIE. Enable interrupts */
-	write_short_reg(devrec, REG_INTCON, val);
-
-	return 0;
+	/* Clear TXNIE and RXIE. Enable interrupts */
+	return regmap_update_bits(devrec->regmap_short, REG_INTCON,
+				  BIT_TXNIE | BIT_RXIE, 0);
 }
 
 static void mrf24j40_stop(struct ieee802154_hw *hw)
 {
 	struct mrf24j40 *devrec = hw->priv;
-	u8 val;
-	int ret;
 
 	dev_dbg(printdev(devrec), "stop\n");
 
-	ret = read_short_reg(devrec, REG_INTCON, &val);
-	if (ret)
-		return;
-	val |= 0x1|0x8; /* Set TXNIE and RXIE. Disable Interrupts */
-	write_short_reg(devrec, REG_INTCON, val);
+	/* Set TXNIE and RXIE. Disable Interrupts */
+	regmap_update_bits(devrec->regmap_short, REG_INTCON,
+			   BIT_TXNIE | BIT_TXNIE, BIT_TXNIE | BIT_TXNIE);
 }
 
 static int mrf24j40_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
@@ -436,21 +642,23 @@
 	WARN_ON(channel > MRF24J40_CHAN_MAX);
 
 	/* Set Channel TODO */
-	val = (channel-11) << 4 | 0x03;
-	write_long_reg(devrec, REG_RFCON0, val);
-
-	/* RF Reset */
-	ret = read_short_reg(devrec, REG_RFCTL, &val);
+	val = (channel - 11) << RFCON0_CH_SHIFT | RFOPT_RECOMMEND;
+	ret = regmap_update_bits(devrec->regmap_long, REG_RFCON0,
+				 RFCON0_CH_MASK, val);
 	if (ret)
 		return ret;
-	val |= 0x04;
-	write_short_reg(devrec, REG_RFCTL, val);
-	val &= ~0x04;
-	write_short_reg(devrec, REG_RFCTL, val);
 
-	udelay(SET_CHANNEL_DELAY_US); /* per datasheet */
+	/* RF Reset */
+	ret = regmap_update_bits(devrec->regmap_short, REG_RFCTL, BIT_RFRST,
+				 BIT_RFRST);
+	if (ret)
+		return ret;
 
-	return 0;
+	ret = regmap_update_bits(devrec->regmap_short, REG_RFCTL, BIT_RFRST, 0);
+	if (!ret)
+		udelay(SET_CHANNEL_DELAY_US); /* per datasheet */
+
+	return ret;
 }
 
 static int mrf24j40_filter(struct ieee802154_hw *hw,
@@ -468,8 +676,8 @@
 		addrh = le16_to_cpu(filt->short_addr) >> 8 & 0xff;
 		addrl = le16_to_cpu(filt->short_addr) & 0xff;
 
-		write_short_reg(devrec, REG_SADRH, addrh);
-		write_short_reg(devrec, REG_SADRL, addrl);
+		regmap_write(devrec->regmap_short, REG_SADRH, addrh);
+		regmap_write(devrec->regmap_short, REG_SADRL, addrl);
 		dev_dbg(printdev(devrec),
 			"Set short addr to %04hx\n", filt->short_addr);
 	}
@@ -480,7 +688,8 @@
 
 		memcpy(addr, &filt->ieee_addr, 8);
 		for (i = 0; i < 8; i++)
-			write_short_reg(devrec, REG_EADR0 + i, addr[i]);
+			regmap_write(devrec->regmap_short, REG_EADR0 + i,
+				     addr[i]);
 
 #ifdef DEBUG
 		pr_debug("Set long addr to: ");
@@ -496,8 +705,8 @@
 
 		panidh = le16_to_cpu(filt->pan_id) >> 8 & 0xff;
 		panidl = le16_to_cpu(filt->pan_id) & 0xff;
-		write_short_reg(devrec, REG_PANIDH, panidh);
-		write_short_reg(devrec, REG_PANIDL, panidl);
+		regmap_write(devrec->regmap_short, REG_PANIDH, panidh);
+		regmap_write(devrec->regmap_short, REG_PANIDL, panidl);
 
 		dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id);
 	}
@@ -507,14 +716,14 @@
 		u8 val;
 		int ret;
 
-		ret = read_short_reg(devrec, REG_RXMCR, &val);
+		if (filt->pan_coord)
+			val = BIT_PANCOORD;
+		else
+			val = 0;
+		ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR,
+					 BIT_PANCOORD, val);
 		if (ret)
 			return ret;
-		if (filt->pan_coord)
-			val |= 0x8;
-		else
-			val &= ~0x8;
-		write_short_reg(devrec, REG_RXMCR, val);
 
 		/* REG_SLOTTED is maintained as default (unslotted/CSMA-CA).
 		 * REG_ORDER is maintained as default (no beacon/superframe).
@@ -527,168 +736,392 @@
 	return 0;
 }
 
-static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
+static void mrf24j40_handle_rx_read_buf_unlock(struct mrf24j40 *devrec)
 {
-	u8 len = RX_FIFO_SIZE;
-	u8 lqi = 0;
-	u8 val;
-	int ret = 0;
-	int ret2;
+	int ret;
+
+	/* Turn back on reception of packets off the air. */
+	devrec->rx_msg.complete = NULL;
+	devrec->rx_buf[0] = MRF24J40_WRITESHORT(REG_BBREG1);
+	devrec->rx_buf[1] = 0x00; /* CLR RXDECINV */
+	ret = spi_async(devrec->spi, &devrec->rx_msg);
+	if (ret)
+		dev_err(printdev(devrec), "failed to unlock rx buffer\n");
+}
+
+static void mrf24j40_handle_rx_read_buf_complete(void *context)
+{
+	struct mrf24j40 *devrec = context;
+	u8 len = devrec->rx_buf[2];
+	u8 rx_local_buf[RX_FIFO_SIZE];
 	struct sk_buff *skb;
 
-	/* Turn off reception of packets off the air. This prevents the
-	 * device from overwriting the buffer while we're reading it. */
-	ret = read_short_reg(devrec, REG_BBREG1, &val);
-	if (ret)
-		goto out;
-	val |= 4; /* SET RXDECINV */
-	write_short_reg(devrec, REG_BBREG1, val);
+	memcpy(rx_local_buf, devrec->rx_fifo_buf, len);
+	mrf24j40_handle_rx_read_buf_unlock(devrec);
 
-	skb = dev_alloc_skb(len);
+	skb = dev_alloc_skb(IEEE802154_MTU);
 	if (!skb) {
-		ret = -ENOMEM;
-		goto out;
+		dev_err(printdev(devrec), "failed to allocate skb\n");
+		return;
 	}
 
-	ret = mrf24j40_read_rx_buf(devrec, skb_put(skb, len), &len, &lqi);
-	if (ret < 0) {
-		dev_err(printdev(devrec), "Failure reading RX FIFO\n");
-		kfree_skb(skb);
-		ret = -EINVAL;
-		goto out;
+	memcpy(skb_put(skb, len), rx_local_buf, len);
+	ieee802154_rx_irqsafe(devrec->hw, skb, 0);
+
+#ifdef DEBUG
+	 print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ", DUMP_PREFIX_OFFSET, 16, 1,
+			rx_local_buf, len, 0);
+	 pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n",
+		  devrec->rx_lqi_buf[0], devrec->rx_lqi_buf[1]);
+#endif
+}
+
+static void mrf24j40_handle_rx_read_buf(void *context)
+{
+	struct mrf24j40 *devrec = context;
+	u16 cmd;
+	int ret;
+
+	/* if length is invalid read the full MTU */
+	if (!ieee802154_is_valid_psdu_len(devrec->rx_buf[2]))
+		devrec->rx_buf[2] = IEEE802154_MTU;
+
+	cmd = MRF24J40_READLONG(REG_RX_FIFO + 1);
+	devrec->rx_addr_buf[0] = cmd >> 8 & 0xff;
+	devrec->rx_addr_buf[1] = cmd & 0xff;
+	devrec->rx_fifo_buf_trx.len = devrec->rx_buf[2];
+	ret = spi_async(devrec->spi, &devrec->rx_buf_msg);
+	if (ret) {
+		dev_err(printdev(devrec), "failed to read rx buffer\n");
+		mrf24j40_handle_rx_read_buf_unlock(devrec);
+	}
+}
+
+static void mrf24j40_handle_rx_read_len(void *context)
+{
+	struct mrf24j40 *devrec = context;
+	u16 cmd;
+	int ret;
+
+	/* read the length of received frame */
+	devrec->rx_msg.complete = mrf24j40_handle_rx_read_buf;
+	devrec->rx_trx.len = 3;
+	cmd = MRF24J40_READLONG(REG_RX_FIFO);
+	devrec->rx_buf[0] = cmd >> 8 & 0xff;
+	devrec->rx_buf[1] = cmd & 0xff;
+
+	ret = spi_async(devrec->spi, &devrec->rx_msg);
+	if (ret) {
+		dev_err(printdev(devrec), "failed to read rx buffer length\n");
+		mrf24j40_handle_rx_read_buf_unlock(devrec);
+	}
+}
+
+static int mrf24j40_handle_rx(struct mrf24j40 *devrec)
+{
+	/* Turn off reception of packets off the air. This prevents the
+	 * device from overwriting the buffer while we're reading it.
+	 */
+	devrec->rx_msg.complete = mrf24j40_handle_rx_read_len;
+	devrec->rx_trx.len = 2;
+	devrec->rx_buf[0] = MRF24J40_WRITESHORT(REG_BBREG1);
+	devrec->rx_buf[1] = BIT_RXDECINV; /* SET RXDECINV */
+
+	return spi_async(devrec->spi, &devrec->rx_msg);
+}
+
+static int
+mrf24j40_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be,
+		     u8 retries)
+{
+	struct mrf24j40 *devrec = hw->priv;
+	u8 val;
+
+	/* min_be */
+	val = min_be << TXMCR_MIN_BE_SHIFT;
+	/* csma backoffs */
+	val |= retries << TXMCR_CSMA_RETRIES_SHIFT;
+
+	return regmap_update_bits(devrec->regmap_short, REG_TXMCR,
+				  TXMCR_MIN_BE_MASK | TXMCR_CSMA_RETRIES_MASK,
+				  val);
+}
+
+static int mrf24j40_set_cca_mode(struct ieee802154_hw *hw,
+				 const struct wpan_phy_cca *cca)
+{
+	struct mrf24j40 *devrec = hw->priv;
+	u8 val;
+
+	/* mapping 802.15.4 to driver spec */
+	switch (cca->mode) {
+	case NL802154_CCA_ENERGY:
+		val = 2;
+		break;
+	case NL802154_CCA_CARRIER:
+		val = 1;
+		break;
+	case NL802154_CCA_ENERGY_CARRIER:
+		switch (cca->opt) {
+		case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
+			val = 3;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
 	}
 
-	/* Cut off the checksum */
-	skb_trim(skb, len-2);
+	return regmap_update_bits(devrec->regmap_short, REG_BBREG2,
+				  BBREG2_CCA_MODE_MASK,
+				  val << BBREG2_CCA_MODE_SHIFT);
+}
 
-	/* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040,
-	 * also from a workqueue).  I think irqsafe is not necessary here.
-	 * Can someone confirm? */
-	ieee802154_rx_irqsafe(devrec->hw, skb, lqi);
+/* array for representing ed levels */
+static const s32 mrf24j40_ed_levels[] = {
+	-9000, -8900, -8800, -8700, -8600, -8500, -8400, -8300, -8200, -8100,
+	-8000, -7900, -7800, -7700, -7600, -7500, -7400, -7300, -7200, -7100,
+	-7000, -6900, -6800, -6700, -6600, -6500, -6400, -6300, -6200, -6100,
+	-6000, -5900, -5800, -5700, -5600, -5500, -5400, -5300, -5200, -5100,
+	-5000, -4900, -4800, -4700, -4600, -4500, -4400, -4300, -4200, -4100,
+	-4000, -3900, -3800, -3700, -3600, -3500
+};
 
-	dev_dbg(printdev(devrec), "RX Handled\n");
+/* map ed levels to register value */
+static const s32 mrf24j40_ed_levels_map[][2] = {
+	{ -9000, 0 }, { -8900, 1 }, { -8800, 2 }, { -8700, 5 }, { -8600, 9 },
+	{ -8500, 13 }, { -8400, 18 }, { -8300, 23 }, { -8200, 27 },
+	{ -8100, 32 }, { -8000, 37 }, { -7900, 43 }, { -7800, 48 },
+	{ -7700, 53 }, { -7600, 58 }, { -7500, 63 }, { -7400, 68 },
+	{ -7300, 73 }, { -7200, 78 }, { -7100, 83 }, { -7000, 89 },
+	{ -6900, 95 }, { -6800, 100 }, { -6700, 107 }, { -6600, 111 },
+	{ -6500, 117 }, { -6400, 121 }, { -6300, 125 }, { -6200, 129 },
+	{ -6100, 133 },	{ -6000, 138 }, { -5900, 143 }, { -5800, 148 },
+	{ -5700, 153 }, { -5600, 159 },	{ -5500, 165 }, { -5400, 170 },
+	{ -5300, 176 }, { -5200, 183 }, { -5100, 188 }, { -5000, 193 },
+	{ -4900, 198 }, { -4800, 203 }, { -4700, 207 }, { -4600, 212 },
+	{ -4500, 216 }, { -4400, 221 }, { -4300, 225 }, { -4200, 228 },
+	{ -4100, 233 }, { -4000, 239 }, { -3900, 245 }, { -3800, 250 },
+	{ -3700, 253 }, { -3600, 254 }, { -3500, 255 },
+};
 
-out:
-	/* Turn back on reception of packets off the air. */
-	ret2 = read_short_reg(devrec, REG_BBREG1, &val);
-	if (ret2)
-		return ret2;
-	val &= ~0x4; /* Clear RXDECINV */
-	write_short_reg(devrec, REG_BBREG1, val);
+static int mrf24j40_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
+{
+	struct mrf24j40 *devrec = hw->priv;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mrf24j40_ed_levels_map); i++) {
+		if (mrf24j40_ed_levels_map[i][0] == mbm)
+			return regmap_write(devrec->regmap_short, REG_CCAEDTH,
+					    mrf24j40_ed_levels_map[i][1]);
+	}
+
+	return -EINVAL;
+}
+
+static const s32 mrf24j40ma_powers[] = {
+	0, -50, -120, -190, -280, -370, -490, -630, -1000, -1050, -1120, -1190,
+	-1280, -1370, -1490, -1630, -2000, -2050, -2120, -2190, -2280, -2370,
+	-2490, -2630, -3000, -3050, -3120, -3190, -3280, -3370, -3490, -3630,
+};
+
+static int mrf24j40_set_txpower(struct ieee802154_hw *hw, s32 mbm)
+{
+	struct mrf24j40 *devrec = hw->priv;
+	s32 small_scale;
+	u8 val;
+
+	if (0 >= mbm && mbm > -1000) {
+		val = TXPWRL_0 << TXPWRL_SHIFT;
+		small_scale = mbm;
+	} else if (-1000 >= mbm && mbm > -2000) {
+		val = TXPWRL_10 << TXPWRL_SHIFT;
+		small_scale = mbm + 1000;
+	} else if (-2000 >= mbm && mbm > -3000) {
+		val = TXPWRL_20 << TXPWRL_SHIFT;
+		small_scale = mbm + 2000;
+	} else if (-3000 >= mbm && mbm > -4000) {
+		val = TXPWRL_30 << TXPWRL_SHIFT;
+		small_scale = mbm + 3000;
+	} else {
+		return -EINVAL;
+	}
+
+	switch (small_scale) {
+	case 0:
+		val |= (TXPWRS_0 << TXPWRS_SHIFT);
+		break;
+	case -50:
+		val |= (TXPWRS_0_5 << TXPWRS_SHIFT);
+		break;
+	case -120:
+		val |= (TXPWRS_1_2 << TXPWRS_SHIFT);
+		break;
+	case -190:
+		val |= (TXPWRS_1_9 << TXPWRS_SHIFT);
+		break;
+	case -280:
+		val |= (TXPWRS_2_8 << TXPWRS_SHIFT);
+		break;
+	case -370:
+		val |= (TXPWRS_3_7 << TXPWRS_SHIFT);
+		break;
+	case -490:
+		val |= (TXPWRS_4_9 << TXPWRS_SHIFT);
+		break;
+	case -630:
+		val |= (TXPWRS_6_3 << TXPWRS_SHIFT);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return regmap_update_bits(devrec->regmap_long, REG_RFCON3,
+				  TXPWRL_MASK | TXPWRS_MASK, val);
+}
+
+static int mrf24j40_set_promiscuous_mode(struct ieee802154_hw *hw, bool on)
+{
+	struct mrf24j40 *devrec = hw->priv;
+	int ret;
+
+	if (on) {
+		/* set PROMI, ERRPKT and NOACKRSP */
+		ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR,
+					 BIT_PROMI | BIT_ERRPKT | BIT_NOACKRSP,
+					 BIT_PROMI | BIT_ERRPKT | BIT_NOACKRSP);
+	} else {
+		/* clear PROMI, ERRPKT and NOACKRSP */
+		ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR,
+					 BIT_PROMI | BIT_ERRPKT | BIT_NOACKRSP,
+					 0);
+	}
 
 	return ret;
 }
 
 static const struct ieee802154_ops mrf24j40_ops = {
 	.owner = THIS_MODULE,
-	.xmit_sync = mrf24j40_tx,
+	.xmit_async = mrf24j40_tx,
 	.ed = mrf24j40_ed,
 	.start = mrf24j40_start,
 	.stop = mrf24j40_stop,
 	.set_channel = mrf24j40_set_channel,
 	.set_hw_addr_filt = mrf24j40_filter,
+	.set_csma_params = mrf24j40_csma_params,
+	.set_cca_mode = mrf24j40_set_cca_mode,
+	.set_cca_ed_level = mrf24j40_set_cca_ed_level,
+	.set_txpower = mrf24j40_set_txpower,
+	.set_promiscuous_mode = mrf24j40_set_promiscuous_mode,
 };
 
+static void mrf24j40_intstat_complete(void *context)
+{
+	struct mrf24j40 *devrec = context;
+	u8 intstat = devrec->irq_buf[1];
+
+	enable_irq(devrec->spi->irq);
+
+	/* Check for TX complete */
+	if (intstat & BIT_TXNIF)
+		ieee802154_xmit_complete(devrec->hw, devrec->tx_skb, false);
+
+	/* Check for Rx */
+	if (intstat & BIT_RXIF)
+		mrf24j40_handle_rx(devrec);
+}
+
 static irqreturn_t mrf24j40_isr(int irq, void *data)
 {
 	struct mrf24j40 *devrec = data;
-	u8 intstat;
 	int ret;
 
+	disable_irq_nosync(irq);
+
+	devrec->irq_buf[0] = MRF24J40_READSHORT(REG_INTSTAT);
 	/* Read the interrupt status */
-	ret = read_short_reg(devrec, REG_INTSTAT, &intstat);
-	if (ret)
-		goto out;
+	ret = spi_async(devrec->spi, &devrec->irq_msg);
+	if (ret) {
+		enable_irq(irq);
+		return IRQ_NONE;
+	}
 
-	/* Check for TX complete */
-	if (intstat & 0x1)
-		complete(&devrec->tx_complete);
-
-	/* Check for Rx */
-	if (intstat & 0x8)
-		mrf24j40_handle_rx(devrec);
-
-out:
 	return IRQ_HANDLED;
 }
 
 static int mrf24j40_hw_init(struct mrf24j40 *devrec)
 {
+	u32 irq_type;
 	int ret;
-	u8 val;
 
 	/* Initialize the device.
 		From datasheet section 3.2: Initialization. */
-	ret = write_short_reg(devrec, REG_SOFTRST, 0x07);
+	ret = regmap_write(devrec->regmap_short, REG_SOFTRST, 0x07);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_PACON2, 0x98);
+	ret = regmap_write(devrec->regmap_short, REG_PACON2, 0x98);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_TXSTBL, 0x95);
+	ret = regmap_write(devrec->regmap_short, REG_TXSTBL, 0x95);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_RFCON0, 0x03);
+	ret = regmap_write(devrec->regmap_long, REG_RFCON0, 0x03);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_RFCON1, 0x01);
+	ret = regmap_write(devrec->regmap_long, REG_RFCON1, 0x01);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_RFCON2, 0x80);
+	ret = regmap_write(devrec->regmap_long, REG_RFCON2, 0x80);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_RFCON6, 0x90);
+	ret = regmap_write(devrec->regmap_long, REG_RFCON6, 0x90);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_RFCON7, 0x80);
+	ret = regmap_write(devrec->regmap_long, REG_RFCON7, 0x80);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_RFCON8, 0x10);
+	ret = regmap_write(devrec->regmap_long, REG_RFCON8, 0x10);
 	if (ret)
 		goto err_ret;
 
-	ret = write_long_reg(devrec, REG_SLPCON1, 0x21);
+	ret = regmap_write(devrec->regmap_long, REG_SLPCON1, 0x21);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_BBREG2, 0x80);
+	ret = regmap_write(devrec->regmap_short, REG_BBREG2, 0x80);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_CCAEDTH, 0x60);
+	ret = regmap_write(devrec->regmap_short, REG_CCAEDTH, 0x60);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_BBREG6, 0x40);
+	ret = regmap_write(devrec->regmap_short, REG_BBREG6, 0x40);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_RFCTL, 0x04);
+	ret = regmap_write(devrec->regmap_short, REG_RFCTL, 0x04);
 	if (ret)
 		goto err_ret;
 
-	ret = write_short_reg(devrec, REG_RFCTL, 0x0);
+	ret = regmap_write(devrec->regmap_short, REG_RFCTL, 0x0);
 	if (ret)
 		goto err_ret;
 
 	udelay(192);
 
 	/* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */
-	ret = read_short_reg(devrec, REG_RXMCR, &val);
-	if (ret)
-		goto err_ret;
-
-	val &= ~0x3; /* Clear RX mode (normal) */
-
-	ret = write_short_reg(devrec, REG_RXMCR, val);
+	ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR, 0x03, 0x00);
 	if (ret)
 		goto err_ret;
 
@@ -696,22 +1129,39 @@
 		/* Enable external amplifier.
 		 * From MRF24J40MC datasheet section 1.3: Operation.
 		 */
-		read_long_reg(devrec, REG_TESTMODE, &val);
-		val |= 0x7; /* Configure GPIO 0-2 to control amplifier */
-		write_long_reg(devrec, REG_TESTMODE, val);
+		regmap_update_bits(devrec->regmap_long, REG_TESTMODE, 0x07,
+				   0x07);
 
-		read_short_reg(devrec, REG_TRISGPIO, &val);
-		val |= 0x8; /* Set GPIO3 as output. */
-		write_short_reg(devrec, REG_TRISGPIO, val);
+		/* Set GPIO3 as output. */
+		regmap_update_bits(devrec->regmap_short, REG_TRISGPIO, 0x08,
+				   0x08);
 
-		read_short_reg(devrec, REG_GPIO, &val);
-		val |= 0x8; /* Set GPIO3 HIGH to enable U5 voltage regulator */
-		write_short_reg(devrec, REG_GPIO, val);
+		/* Set GPIO3 HIGH to enable U5 voltage regulator */
+		regmap_update_bits(devrec->regmap_short, REG_GPIO, 0x08, 0x08);
 
 		/* Reduce TX pwr to meet FCC requirements.
 		 * From MRF24J40MC datasheet section 3.1.1
 		 */
-		write_long_reg(devrec, REG_RFCON3, 0x28);
+		regmap_write(devrec->regmap_long, REG_RFCON3, 0x28);
+	}
+
+	irq_type = irq_get_trigger_type(devrec->spi->irq);
+	if (irq_type == IRQ_TYPE_EDGE_RISING ||
+	    irq_type == IRQ_TYPE_EDGE_FALLING)
+		dev_warn(&devrec->spi->dev,
+			 "Using edge triggered irq's are not recommended, because it can cause races and result in a non-functional driver!\n");
+	switch (irq_type) {
+	case IRQ_TYPE_EDGE_RISING:
+	case IRQ_TYPE_LEVEL_HIGH:
+		/* set interrupt polarity to rising */
+		ret = regmap_update_bits(devrec->regmap_long, REG_SLPCON0,
+					 BIT_INTEDGE, BIT_INTEDGE);
+		if (ret)
+			goto err_ret;
+		break;
+	default:
+		/* default is falling edge */
+		break;
 	}
 
 	return 0;
@@ -720,67 +1170,178 @@
 	return ret;
 }
 
+static void
+mrf24j40_setup_tx_spi_messages(struct mrf24j40 *devrec)
+{
+	spi_message_init(&devrec->tx_msg);
+	devrec->tx_msg.context = devrec;
+	devrec->tx_msg.complete = write_tx_buf_complete;
+	devrec->tx_hdr_trx.len = 2;
+	devrec->tx_hdr_trx.tx_buf = devrec->tx_hdr_buf;
+	spi_message_add_tail(&devrec->tx_hdr_trx, &devrec->tx_msg);
+	devrec->tx_len_trx.len = 2;
+	devrec->tx_len_trx.tx_buf = devrec->tx_len_buf;
+	spi_message_add_tail(&devrec->tx_len_trx, &devrec->tx_msg);
+	spi_message_add_tail(&devrec->tx_buf_trx, &devrec->tx_msg);
+
+	spi_message_init(&devrec->tx_post_msg);
+	devrec->tx_post_msg.context = devrec;
+	devrec->tx_post_trx.len = 2;
+	devrec->tx_post_trx.tx_buf = devrec->tx_post_buf;
+	spi_message_add_tail(&devrec->tx_post_trx, &devrec->tx_post_msg);
+}
+
+static void
+mrf24j40_setup_rx_spi_messages(struct mrf24j40 *devrec)
+{
+	spi_message_init(&devrec->rx_msg);
+	devrec->rx_msg.context = devrec;
+	devrec->rx_trx.len = 2;
+	devrec->rx_trx.tx_buf = devrec->rx_buf;
+	devrec->rx_trx.rx_buf = devrec->rx_buf;
+	spi_message_add_tail(&devrec->rx_trx, &devrec->rx_msg);
+
+	spi_message_init(&devrec->rx_buf_msg);
+	devrec->rx_buf_msg.context = devrec;
+	devrec->rx_buf_msg.complete = mrf24j40_handle_rx_read_buf_complete;
+	devrec->rx_addr_trx.len = 2;
+	devrec->rx_addr_trx.tx_buf = devrec->rx_addr_buf;
+	spi_message_add_tail(&devrec->rx_addr_trx, &devrec->rx_buf_msg);
+	devrec->rx_fifo_buf_trx.rx_buf = devrec->rx_fifo_buf;
+	spi_message_add_tail(&devrec->rx_fifo_buf_trx, &devrec->rx_buf_msg);
+	devrec->rx_lqi_trx.len = 2;
+	devrec->rx_lqi_trx.rx_buf = devrec->rx_lqi_buf;
+	spi_message_add_tail(&devrec->rx_lqi_trx, &devrec->rx_buf_msg);
+}
+
+static void
+mrf24j40_setup_irq_spi_messages(struct mrf24j40 *devrec)
+{
+	spi_message_init(&devrec->irq_msg);
+	devrec->irq_msg.context = devrec;
+	devrec->irq_msg.complete = mrf24j40_intstat_complete;
+	devrec->irq_trx.len = 2;
+	devrec->irq_trx.tx_buf = devrec->irq_buf;
+	devrec->irq_trx.rx_buf = devrec->irq_buf;
+	spi_message_add_tail(&devrec->irq_trx, &devrec->irq_msg);
+}
+
+static void  mrf24j40_phy_setup(struct mrf24j40 *devrec)
+{
+	ieee802154_random_extended_addr(&devrec->hw->phy->perm_extended_addr);
+	devrec->hw->phy->current_channel = 11;
+
+	/* mrf24j40 supports max_minbe 0 - 3 */
+	devrec->hw->phy->supported.max_minbe = 3;
+	/* datasheet doesn't say anything about max_be, but we have min_be
+	 * So we assume the max_be default.
+	 */
+	devrec->hw->phy->supported.min_maxbe = 5;
+	devrec->hw->phy->supported.max_maxbe = 5;
+
+	devrec->hw->phy->cca.mode = NL802154_CCA_CARRIER;
+	devrec->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+					       BIT(NL802154_CCA_CARRIER) |
+					       BIT(NL802154_CCA_ENERGY_CARRIER);
+	devrec->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND);
+
+	devrec->hw->phy->cca_ed_level = -6900;
+	devrec->hw->phy->supported.cca_ed_levels = mrf24j40_ed_levels;
+	devrec->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(mrf24j40_ed_levels);
+
+	switch (spi_get_device_id(devrec->spi)->driver_data) {
+	case MRF24J40:
+	case MRF24J40MA:
+		devrec->hw->phy->supported.tx_powers = mrf24j40ma_powers;
+		devrec->hw->phy->supported.tx_powers_size = ARRAY_SIZE(mrf24j40ma_powers);
+		devrec->hw->phy->flags |= WPAN_PHY_FLAG_TXPOWER;
+		break;
+	default:
+		break;
+	}
+}
+
 static int mrf24j40_probe(struct spi_device *spi)
 {
-	int ret = -ENOMEM;
+	int ret = -ENOMEM, irq_type;
+	struct ieee802154_hw *hw;
 	struct mrf24j40 *devrec;
 
 	dev_info(&spi->dev, "probe(). IRQ: %d\n", spi->irq);
 
-	devrec = devm_kzalloc(&spi->dev, sizeof(struct mrf24j40), GFP_KERNEL);
-	if (!devrec)
-		goto err_ret;
-	devrec->buf = devm_kzalloc(&spi->dev, 3, GFP_KERNEL);
-	if (!devrec->buf)
-		goto err_ret;
-
-	spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
-	if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
-		spi->max_speed_hz = MAX_SPI_SPEED_HZ;
-
-	mutex_init(&devrec->buffer_mutex);
-	init_completion(&devrec->tx_complete);
-	devrec->spi = spi;
-	spi_set_drvdata(spi, devrec);
-
 	/* Register with the 802154 subsystem */
 
-	devrec->hw = ieee802154_alloc_hw(0, &mrf24j40_ops);
-	if (!devrec->hw)
+	hw = ieee802154_alloc_hw(sizeof(*devrec), &mrf24j40_ops);
+	if (!hw)
 		goto err_ret;
 
-	devrec->hw->priv = devrec;
-	devrec->hw->parent = &devrec->spi->dev;
+	devrec = hw->priv;
+	devrec->spi = spi;
+	spi_set_drvdata(spi, devrec);
+	devrec->hw = hw;
+	devrec->hw->parent = &spi->dev;
 	devrec->hw->phy->supported.channels[0] = CHANNEL_MASK;
-	devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AFILT;
+	devrec->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
+			    IEEE802154_HW_CSMA_PARAMS |
+			    IEEE802154_HW_PROMISCUOUS;
+
+	devrec->hw->phy->flags = WPAN_PHY_FLAG_CCA_MODE |
+				 WPAN_PHY_FLAG_CCA_ED_LEVEL;
+
+	mrf24j40_setup_tx_spi_messages(devrec);
+	mrf24j40_setup_rx_spi_messages(devrec);
+	mrf24j40_setup_irq_spi_messages(devrec);
+
+	devrec->regmap_short = devm_regmap_init_spi(spi,
+						    &mrf24j40_short_regmap);
+	if (IS_ERR(devrec->regmap_short)) {
+		ret = PTR_ERR(devrec->regmap_short);
+		dev_err(&spi->dev, "Failed to allocate short register map: %d\n",
+			ret);
+		goto err_register_device;
+	}
+
+	devrec->regmap_long = devm_regmap_init(&spi->dev,
+					       &mrf24j40_long_regmap_bus,
+					       spi, &mrf24j40_long_regmap);
+	if (IS_ERR(devrec->regmap_long)) {
+		ret = PTR_ERR(devrec->regmap_long);
+		dev_err(&spi->dev, "Failed to allocate long register map: %d\n",
+			ret);
+		goto err_register_device;
+	}
+
+	if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) {
+		dev_warn(&spi->dev, "spi clock above possible maximum: %d",
+			 MAX_SPI_SPEED_HZ);
+		return -EINVAL;
+	}
+
+	ret = mrf24j40_hw_init(devrec);
+	if (ret)
+		goto err_register_device;
+
+	mrf24j40_phy_setup(devrec);
+
+	/* request IRQF_TRIGGER_LOW as fallback default */
+	irq_type = irq_get_trigger_type(spi->irq);
+	if (!irq_type)
+		irq_type = IRQF_TRIGGER_LOW;
+
+	ret = devm_request_irq(&spi->dev, spi->irq, mrf24j40_isr,
+			       irq_type, dev_name(&spi->dev), devrec);
+	if (ret) {
+		dev_err(printdev(devrec), "Unable to get IRQ");
+		goto err_register_device;
+	}
 
 	dev_dbg(printdev(devrec), "registered mrf24j40\n");
 	ret = ieee802154_register_hw(devrec->hw);
 	if (ret)
 		goto err_register_device;
 
-	ret = mrf24j40_hw_init(devrec);
-	if (ret)
-		goto err_hw_init;
-
-	ret = devm_request_threaded_irq(&spi->dev,
-					spi->irq,
-					NULL,
-					mrf24j40_isr,
-					IRQF_TRIGGER_LOW|IRQF_ONESHOT,
-					dev_name(&spi->dev),
-					devrec);
-
-	if (ret) {
-		dev_err(printdev(devrec), "Unable to get IRQ");
-		goto err_irq;
-	}
-
 	return 0;
 
-err_irq:
-err_hw_init:
-	ieee802154_unregister_hw(devrec->hw);
 err_register_device:
 	ieee802154_free_hw(devrec->hw);
 err_ret:
@@ -801,6 +1362,14 @@
 	return 0;
 }
 
+static const struct of_device_id mrf24j40_of_match[] = {
+	{ .compatible = "microchip,mrf24j40", .data = (void *)MRF24J40 },
+	{ .compatible = "microchip,mrf24j40ma", .data = (void *)MRF24J40MA },
+	{ .compatible = "microchip,mrf24j40mc", .data = (void *)MRF24J40MC },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, mrf24j40_of_match);
+
 static const struct spi_device_id mrf24j40_ids[] = {
 	{ "mrf24j40", MRF24J40 },
 	{ "mrf24j40ma", MRF24J40MA },
@@ -811,6 +1380,7 @@
 
 static struct spi_driver mrf24j40_driver = {
 	.driver = {
+		.of_match_table = of_match_ptr(mrf24j40_of_match),
 		.name = "mrf24j40",
 		.owner = THIS_MODULE,
 	},
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 207f62e..d50887e 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -344,17 +344,18 @@
 {
 	const struct iphdr *ip4h = ip_hdr(skb);
 	struct net_device *dev = skb->dev;
+	struct net *net = dev_net(dev);
 	struct rtable *rt;
 	int err, ret = NET_XMIT_DROP;
 	struct flowi4 fl4 = {
-		.flowi4_oif = dev_get_iflink(dev),
+		.flowi4_oif = dev->ifindex,
 		.flowi4_tos = RT_TOS(ip4h->tos),
 		.flowi4_flags = FLOWI_FLAG_ANYSRC,
 		.daddr = ip4h->daddr,
 		.saddr = ip4h->saddr,
 	};
 
-	rt = ip_route_output_flow(dev_net(dev), &fl4, NULL);
+	rt = ip_route_output_flow(net, &fl4, NULL);
 	if (IS_ERR(rt))
 		goto err;
 
@@ -364,7 +365,7 @@
 	}
 	skb_dst_drop(skb);
 	skb_dst_set(skb, &rt->dst);
-	err = ip_local_out(skb);
+	err = ip_local_out(net, skb->sk, skb);
 	if (unlikely(net_xmit_eval(err)))
 		dev->stats.tx_errors++;
 	else
@@ -381,10 +382,11 @@
 {
 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 	struct net_device *dev = skb->dev;
+	struct net *net = dev_net(dev);
 	struct dst_entry *dst;
 	int err, ret = NET_XMIT_DROP;
 	struct flowi6 fl6 = {
-		.flowi6_iif = skb->dev->ifindex,
+		.flowi6_iif = dev->ifindex,
 		.daddr = ip6h->daddr,
 		.saddr = ip6h->saddr,
 		.flowi6_flags = FLOWI_FLAG_ANYSRC,
@@ -393,7 +395,7 @@
 		.flowi6_proto = ip6h->nexthdr,
 	};
 
-	dst = ip6_route_output(dev_net(dev), NULL, &fl6);
+	dst = ip6_route_output(net, NULL, &fl6);
 	if (dst->error) {
 		ret = dst->error;
 		dst_release(dst);
@@ -401,7 +403,7 @@
 	}
 	skb_dst_drop(skb);
 	skb_dst_set(skb, dst);
-	err = ip6_local_out(skb);
+	err = ip6_local_out(net, skb->sk, skb);
 	if (unlikely(net_xmit_eval(err)))
 		dev->stats.tx_errors++;
 	else
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 47da435..86f6c62 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -412,7 +412,7 @@
 
 	port = macvlan_port_get_rcu(skb->dev);
 	if (is_multicast_ether_addr(eth->h_dest)) {
-		skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
+		skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
 		if (!skb)
 			return RX_HANDLER_CONSUMED;
 		eth = eth_hdr(skb);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 248478c..197c939 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -137,7 +137,7 @@
 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
 		      NETIF_F_TSO6 | NETIF_F_UFO)
 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
-#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
 
 static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
 {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index c5ad98a..60994a8 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -69,20 +69,39 @@
 	---help---
 	  Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs
 
+config BCM_NET_PHYLIB
+	tristate
+
 config BROADCOM_PHY
 	tristate "Drivers for Broadcom PHYs"
+	select BCM_NET_PHYLIB
 	---help---
 	  Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
 	  BCM5481 and BCM5482 PHYs.
 
+config BCM_CYGNUS_PHY
+	tristate "Drivers for Broadcom Cygnus SoC internal PHY"
+	depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+	depends on MDIO_BCM_IPROC
+	select BCM_NET_PHYLIB
+	---help---
+	  This PHY driver is for the 1G internal PHYs of the Broadcom
+	  Cygnus Family SoC.
+
+	  Currently supports internal PHY's used in the BCM11300,
+	  BCM11320, BCM11350, BCM11360, BCM58300, BCM58302,
+	  BCM58303 & BCM58305 Broadcom Cygnus SoCs.
+
 config BCM63XX_PHY
 	tristate "Drivers for Broadcom 63xx SOCs internal PHY"
 	depends on BCM63XX
+	select BCM_NET_PHYLIB
 	---help---
 	  Currently supports the 6348 and 6358 PHYs.
 
 config BCM7XXX_PHY
 	tristate "Drivers for Broadcom 7xxx SOCs internal PHYs"
+	select BCM_NET_PHYLIB
 	---help---
 	  Currently supports the BCM7366, BCM7439, BCM7445, and
 	  40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
@@ -122,6 +141,11 @@
 	---help---
 	  Supports the KSZ9021, VSC8201, KS8001 PHYs.
 
+config DP83848_PHY
+	tristate "Driver for Texas Instruments DP83848 PHY"
+	---help---
+	  Supports the DP83848 PHY.
+
 config DP83867_PHY
 	tristate "Drivers for Texas Instruments DP83867 Gigabit PHY"
 	---help---
@@ -168,8 +192,6 @@
 	  busses. It is required by the Octeon and ThunderX ethernet device
 	  drivers.
 
-	  If in doubt, say Y.
-
 config MDIO_SUN4I
 	tristate "Allwinner sun4i MDIO interface support"
 	depends on ARCH_SUNXI
@@ -225,6 +247,15 @@
 	  This hardware can be found in the Broadcom GENET Ethernet MAC
 	  controllers as well as some Broadcom Ethernet switches such as the
 	  Starfighter 2 switches.
+
+config MDIO_BCM_IPROC
+	tristate "Broadcom iProc MDIO bus controller"
+	depends on ARCH_BCM_IPROC || COMPILE_TEST
+	depends on HAS_IOMEM && OF_MDIO
+	help
+	  This module provides a driver for the MDIO busses found in the
+	  Broadcom iProc SoC's.
+
 endif # PHYLIB
 
 config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 87f079c..f31a4e2 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,10 +12,12 @@
 obj-$(CONFIG_SMSC_PHY)		+= smsc.o
 obj-$(CONFIG_TERANETICS_PHY)	+= teranetics.o
 obj-$(CONFIG_VITESSE_PHY)	+= vitesse.o
+obj-$(CONFIG_BCM_NET_PHYLIB)	+= bcm-phy-lib.o
 obj-$(CONFIG_BROADCOM_PHY)	+= broadcom.o
 obj-$(CONFIG_BCM63XX_PHY)	+= bcm63xx.o
 obj-$(CONFIG_BCM7XXX_PHY)	+= bcm7xxx.o
 obj-$(CONFIG_BCM87XX_PHY)	+= bcm87xx.o
+obj-$(CONFIG_BCM_CYGNUS_PHY)	+= bcm-cygnus.o
 obj-$(CONFIG_ICPLUS_PHY)	+= icplus.o
 obj-$(CONFIG_REALTEK_PHY)	+= realtek.o
 obj-$(CONFIG_LSI_ET1011C_PHY)	+= et1011c.o
@@ -24,6 +26,7 @@
 obj-$(CONFIG_MDIO_GPIO)		+= mdio-gpio.o
 obj-$(CONFIG_NATIONAL_PHY)	+= national.o
 obj-$(CONFIG_DP83640_PHY)	+= dp83640.o
+obj-$(CONFIG_DP83848_PHY)	+= dp83848.o
 obj-$(CONFIG_DP83867_PHY)	+= dp83867.o
 obj-$(CONFIG_STE10XP)		+= ste10Xp.o
 obj-$(CONFIG_MICREL_PHY)	+= micrel.o
@@ -38,3 +41,4 @@
 obj-$(CONFIG_MDIO_MOXART)	+= mdio-moxart.o
 obj-$(CONFIG_MDIO_BCM_UNIMAC)	+= mdio-bcm-unimac.o
 obj-$(CONFIG_MICROCHIP_PHY)	+= microchip.o
+obj-$(CONFIG_MDIO_BCM_IPROC)	+= mdio-bcm-iproc.o
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
index d6111af..f1936b7 100644
--- a/drivers/net/phy/aquantia.c
+++ b/drivers/net/phy/aquantia.c
@@ -171,20 +171,7 @@
 },
 };
 
-static int __init aquantia_init(void)
-{
-	return phy_drivers_register(aquantia_driver,
-				    ARRAY_SIZE(aquantia_driver));
-}
-
-static void __exit aquantia_exit(void)
-{
-	return phy_drivers_unregister(aquantia_driver,
-				      ARRAY_SIZE(aquantia_driver));
-}
-
-module_init(aquantia_init);
-module_exit(aquantia_exit);
+module_phy_driver(aquantia_driver);
 
 static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
 	{ PHY_ID_AQ1202, 0xfffffff0 },
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
new file mode 100644
index 0000000..49bbc68
--- /dev/null
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Broadcom Cygnus SoC internal transceivers support. */
+#include "bcm-phy-lib.h"
+#include <linux/brcmphy.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+/* Broadcom Cygnus Phy specific registers */
+#define MII_BCM_CYGNUS_AFE_VDAC_ICTRL_0  0x91E5 /* VDAL Control register */
+
+static int bcm_cygnus_afe_config(struct phy_device *phydev)
+{
+	int rc;
+
+	/* ensure smdspclk is enabled */
+	rc = phy_write(phydev, MII_BCM54XX_AUX_CTL, 0x0c30);
+	if (rc < 0)
+		return rc;
+
+	/* AFE_VDAC_ICTRL_0 bit 7:4 Iq=1100 for 1g 10bt, normal modes */
+	rc = bcm_phy_write_misc(phydev, 0x39, 0x01, 0xA7C8);
+	if (rc < 0)
+		return rc;
+
+	/* AFE_HPF_TRIM_OTHERS bit11=1, short cascode enable for all modes*/
+	rc = bcm_phy_write_misc(phydev, 0x3A, 0x00, 0x0803);
+	if (rc < 0)
+		return rc;
+
+	/* AFE_TX_CONFIG_1 bit 7:4 Iq=1100 for test modes */
+	rc = bcm_phy_write_misc(phydev, 0x3A, 0x01, 0xA740);
+	if (rc < 0)
+		return rc;
+
+	/* AFE TEMPSEN_OTHERS rcal_HT, rcal_LT 10000 */
+	rc = bcm_phy_write_misc(phydev, 0x3A, 0x03, 0x8400);
+	if (rc < 0)
+		return rc;
+
+	/* AFE_FUTURE_RSV bit 2:0 rccal <2:0>=100 */
+	rc = bcm_phy_write_misc(phydev, 0x3B, 0x00, 0x0004);
+	if (rc < 0)
+		return rc;
+
+	/* Adjust bias current trim to overcome digital offSet */
+	rc = phy_write(phydev, MII_BRCM_CORE_BASE1E, 0x02);
+	if (rc < 0)
+		return rc;
+
+	/* make rcal=100, since rdb default is 000 */
+	rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10);
+	if (rc < 0)
+		return rc;
+
+	/* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
+	rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10);
+	if (rc < 0)
+		return rc;
+
+	/* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
+	rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00);
+
+	return 0;
+}
+
+static int bcm_cygnus_config_init(struct phy_device *phydev)
+{
+	int reg, rc;
+
+	reg = phy_read(phydev, MII_BCM54XX_ECR);
+	if (reg < 0)
+		return reg;
+
+	/* Mask interrupts globally. */
+	reg |= MII_BCM54XX_ECR_IM;
+	rc = phy_write(phydev, MII_BCM54XX_ECR, reg);
+	if (rc)
+		return rc;
+
+	/* Unmask events of interest */
+	reg = ~(MII_BCM54XX_INT_DUPLEX |
+		MII_BCM54XX_INT_SPEED |
+		MII_BCM54XX_INT_LINK);
+	rc = phy_write(phydev, MII_BCM54XX_IMR, reg);
+	if (rc)
+		return rc;
+
+	/* Apply AFE settings for the PHY */
+	rc = bcm_cygnus_afe_config(phydev);
+	if (rc)
+		return rc;
+
+	/* Advertise EEE */
+	rc = bcm_phy_enable_eee(phydev);
+	if (rc)
+		return rc;
+
+	/* Enable APD */
+	return bcm_phy_enable_apd(phydev, false);
+}
+
+static int bcm_cygnus_resume(struct phy_device *phydev)
+{
+	int rc;
+
+	genphy_resume(phydev);
+
+	/* Re-initialize the PHY to apply AFE work-arounds and
+	 * configurations when coming out of suspend.
+	 */
+	rc = bcm_cygnus_config_init(phydev);
+	if (rc)
+		return rc;
+
+	/* restart auto negotiation with the new settings */
+	return genphy_config_aneg(phydev);
+}
+
+static struct phy_driver bcm_cygnus_phy_driver[] = {
+{
+	.phy_id        = PHY_ID_BCM_CYGNUS,
+	.phy_id_mask   = 0xfffffff0,
+	.name          = "Broadcom Cygnus PHY",
+	.features      = PHY_GBIT_FEATURES |
+			SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+	.config_init   = bcm_cygnus_config_init,
+	.config_aneg   = genphy_config_aneg,
+	.read_status   = genphy_read_status,
+	.ack_interrupt = bcm_phy_ack_intr,
+	.config_intr   = bcm_phy_config_intr,
+	.suspend       = genphy_suspend,
+	.resume        = bcm_cygnus_resume,
+} };
+
+static struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
+	{ PHY_ID_BCM_CYGNUS, 0xfffffff0, },
+	{ }
+};
+MODULE_DEVICE_TABLE(mdio, bcm_cygnus_phy_tbl);
+
+module_phy_driver(bcm_cygnus_phy_driver);
+
+MODULE_DESCRIPTION("Broadcom Cygnus internal PHY driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
new file mode 100644
index 0000000..ddb377e
--- /dev/null
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "bcm-phy-lib.h"
+#include <linux/brcmphy.h>
+#include <linux/export.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define MII_BCM_CHANNEL_WIDTH     0x2000
+#define BCM_CL45VEN_EEE_ADV       0x3c
+
+int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val)
+{
+	int rc;
+
+	rc = phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
+	if (rc < 0)
+		return rc;
+
+	return phy_write(phydev, MII_BCM54XX_EXP_DATA, val);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_write_exp);
+
+int bcm_phy_read_exp(struct phy_device *phydev, u16 reg)
+{
+	int val;
+
+	val = phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
+	if (val < 0)
+		return val;
+
+	val = phy_read(phydev, MII_BCM54XX_EXP_DATA);
+
+	/* Restore default value.  It's O.K. if this write fails. */
+	phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_read_exp);
+
+int bcm_phy_write_misc(struct phy_device *phydev,
+		       u16 reg, u16 chl, u16 val)
+{
+	int rc;
+	int tmp;
+
+	rc = phy_write(phydev, MII_BCM54XX_AUX_CTL,
+		       MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+	if (rc < 0)
+		return rc;
+
+	tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
+	tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
+	rc = phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
+	if (rc < 0)
+		return rc;
+
+	tmp = (chl * MII_BCM_CHANNEL_WIDTH) | reg;
+	rc = bcm_phy_write_exp(phydev, tmp, val);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_write_misc);
+
+int bcm_phy_read_misc(struct phy_device *phydev,
+		      u16 reg, u16 chl)
+{
+	int rc;
+	int tmp;
+
+	rc = phy_write(phydev, MII_BCM54XX_AUX_CTL,
+		       MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+	if (rc < 0)
+		return rc;
+
+	tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
+	tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
+	rc = phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
+	if (rc < 0)
+		return rc;
+
+	tmp = (chl * MII_BCM_CHANNEL_WIDTH) | reg;
+	rc = bcm_phy_read_exp(phydev, tmp);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_read_misc);
+
+int bcm_phy_ack_intr(struct phy_device *phydev)
+{
+	int reg;
+
+	/* Clear pending interrupts.  */
+	reg = phy_read(phydev, MII_BCM54XX_ISR);
+	if (reg < 0)
+		return reg;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_ack_intr);
+
+int bcm_phy_config_intr(struct phy_device *phydev)
+{
+	int reg;
+
+	reg = phy_read(phydev, MII_BCM54XX_ECR);
+	if (reg < 0)
+		return reg;
+
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+		reg &= ~MII_BCM54XX_ECR_IM;
+	else
+		reg |= MII_BCM54XX_ECR_IM;
+
+	return phy_write(phydev, MII_BCM54XX_ECR, reg);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_config_intr);
+
+int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow)
+{
+	phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
+	return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
+}
+EXPORT_SYMBOL_GPL(bcm_phy_read_shadow);
+
+int bcm_phy_write_shadow(struct phy_device *phydev, u16 shadow,
+			 u16 val)
+{
+	return phy_write(phydev, MII_BCM54XX_SHD,
+			 MII_BCM54XX_SHD_WRITE |
+			 MII_BCM54XX_SHD_VAL(shadow) |
+			 MII_BCM54XX_SHD_DATA(val));
+}
+EXPORT_SYMBOL_GPL(bcm_phy_write_shadow);
+
+int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down)
+{
+	int val;
+
+	if (dll_pwr_down) {
+		val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
+		if (val < 0)
+			return val;
+
+		val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
+		bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val);
+	}
+
+	val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_APD);
+	if (val < 0)
+		return val;
+
+	/* Clear APD bits */
+	val &= BCM_APD_CLR_MASK;
+
+	if (phydev->autoneg == AUTONEG_ENABLE)
+		val |= BCM54XX_SHD_APD_EN;
+	else
+		val |= BCM_NO_ANEG_APD_EN;
+
+	/* Enable energy detect single link pulse for easy wakeup */
+	val |= BCM_APD_SINGLELP_EN;
+
+	/* Enable Auto Power-Down (APD) for the PHY */
+	return bcm_phy_write_shadow(phydev, BCM54XX_SHD_APD, val);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
+
+int bcm_phy_enable_eee(struct phy_device *phydev)
+{
+	int val;
+
+	/* Enable EEE at PHY level */
+	val = phy_read_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
+				    MDIO_MMD_AN, phydev->addr);
+	if (val < 0)
+		return val;
+
+	val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X;
+
+	phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
+			       MDIO_MMD_AN,  phydev->addr, (u32)val);
+
+	/* Advertise EEE */
+	val = phy_read_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV,
+				    MDIO_MMD_AN, phydev->addr);
+	if (val < 0)
+		return val;
+
+	val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
+
+	phy_write_mmd_indirect(phydev, BCM_CL45VEN_EEE_ADV,
+			       MDIO_MMD_AN,  phydev->addr, (u32)val);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_enable_eee);
+
+MODULE_DESCRIPTION("Broadcom PHY Library");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
new file mode 100644
index 0000000..b2091c8
--- /dev/null
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BCM_PHY_LIB_H
+#define _LINUX_BCM_PHY_LIB_H
+
+#include <linux/phy.h>
+
+int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
+int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
+
+int bcm_phy_write_misc(struct phy_device *phydev,
+		       u16 reg, u16 chl, u16 value);
+int bcm_phy_read_misc(struct phy_device *phydev,
+		      u16 reg, u16 chl);
+
+int bcm_phy_write_shadow(struct phy_device *phydev, u16 shadow,
+			 u16 val);
+int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow);
+
+int bcm_phy_ack_intr(struct phy_device *phydev);
+int bcm_phy_config_intr(struct phy_device *phydev);
+
+int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down);
+
+int bcm_phy_enable_eee(struct phy_device *phydev);
+#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 830ec31..86b2805 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -6,6 +6,7 @@
  *	as published by the Free Software Foundation; either version
  *	2 of the License, or (at your option) any later version.
  */
+#include "bcm-phy-lib.h"
 #include <linux/module.h>
 #include <linux/phy.h>
 
@@ -42,35 +43,6 @@
 	return phy_write(phydev, MII_BCM63XX_IR, reg);
 }
 
-static int bcm63xx_ack_interrupt(struct phy_device *phydev)
-{
-	int reg;
-
-	/* Clear pending interrupts.  */
-	reg = phy_read(phydev, MII_BCM63XX_IR);
-	if (reg < 0)
-		return reg;
-
-	return 0;
-}
-
-static int bcm63xx_config_intr(struct phy_device *phydev)
-{
-	int reg, err;
-
-	reg = phy_read(phydev, MII_BCM63XX_IR);
-	if (reg < 0)
-		return reg;
-
-	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
-		reg &= ~MII_BCM63XX_IR_GMASK;
-	else
-		reg |= MII_BCM63XX_IR_GMASK;
-
-	err = phy_write(phydev, MII_BCM63XX_IR, reg);
-	return err;
-}
-
 static struct phy_driver bcm63xx_driver[] = {
 {
 	.phy_id		= 0x00406000,
@@ -82,8 +54,8 @@
 	.config_init	= bcm63xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm63xx_ack_interrupt,
-	.config_intr	= bcm63xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	/* same phy as above, with just a different OUI */
@@ -95,8 +67,8 @@
 	.config_init	= bcm63xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm63xx_ack_interrupt,
-	.config_intr	= bcm63xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 } };
 
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 6b701b3..03d4809 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -12,12 +12,12 @@
 #include <linux/module.h>
 #include <linux/phy.h>
 #include <linux/delay.h>
+#include "bcm-phy-lib.h"
 #include <linux/bitops.h>
 #include <linux/brcmphy.h>
 #include <linux/mdio.h>
 
 /* Broadcom BCM7xxx internal PHY registers */
-#define MII_BCM7XXX_CHANNEL_WIDTH	0x2000
 
 /* 40nm only register definitions */
 #define MII_BCM7XXX_100TX_AUX_CTL	0x10
@@ -25,7 +25,6 @@
 #define MII_BCM7XXX_100TX_DISC		0x14
 #define MII_BCM7XXX_AUX_MODE		0x1d
 #define  MII_BCM7XX_64CLK_MDIO		BIT(12)
-#define MII_BCM7XXX_CORE_BASE1E		0x1e
 #define MII_BCM7XXX_TEST		0x1f
 #define  MII_BCM7XXX_SHD_MODE_2		BIT(2)
 
@@ -46,39 +45,13 @@
 #define AFE_VDAC_OTHERS_0		MISC_ADDR(0x39, 3)
 #define AFE_HPF_TRIM_OTHERS		MISC_ADDR(0x3a, 0)
 
-#define CORE_EXPB0			0xb0
-
-static void phy_write_exp(struct phy_device *phydev,
-					u16 reg, u16 value)
-{
-	phy_write(phydev, MII_BCM54XX_EXP_SEL, MII_BCM54XX_EXP_SEL_ER | reg);
-	phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
-}
-
-static void phy_write_misc(struct phy_device *phydev,
-					u16 reg, u16 chl, u16 value)
-{
-	int tmp;
-
-	phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
-
-	tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
-	tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
-	phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
-
-	tmp = (chl * MII_BCM7XXX_CHANNEL_WIDTH) | reg;
-	phy_write(phydev, MII_BCM54XX_EXP_SEL, tmp);
-
-	phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
-}
-
 static void r_rc_cal_reset(struct phy_device *phydev)
 {
 	/* Reset R_CAL/RC_CAL Engine */
-	phy_write_exp(phydev, 0x00b0, 0x0010);
+	bcm_phy_write_exp(phydev, 0x00b0, 0x0010);
 
 	/* Disable Reset R_AL/RC_CAL Engine */
-	phy_write_exp(phydev, 0x00b0, 0x0000);
+	bcm_phy_write_exp(phydev, 0x00b0, 0x0000);
 }
 
 static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
@@ -86,38 +59,38 @@
 	/* Increase VCO range to prevent unlocking problem of PLL at low
 	 * temp
 	 */
-	phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
+	bcm_phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
 
 	/* Change Ki to 011 */
-	phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
+	bcm_phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
 
 	/* Disable loading of TVCO buffer to bandgap, set bandgap trim
 	 * to 111
 	 */
-	phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
+	bcm_phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
 
 	/* Adjust bias current trim by -3 */
-	phy_write_misc(phydev, DSP_TAP10, 0x690b);
+	bcm_phy_write_misc(phydev, DSP_TAP10, 0x690b);
 
 	/* Switch to CORE_BASE1E */
-	phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0xd);
+	phy_write(phydev, MII_BRCM_CORE_BASE1E, 0xd);
 
 	r_rc_cal_reset(phydev);
 
 	/* write AFE_RXCONFIG_0 */
-	phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
+	bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
 
 	/* write AFE_RXCONFIG_1 */
-	phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
+	bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
 
 	/* write AFE_RX_LP_COUNTER */
-	phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
+	bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
 
 	/* write AFE_HPF_TRIM_OTHERS */
-	phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
+	bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
 
 	/* write AFTE_TX_CONFIG */
-	phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
+	bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
 
 	return 0;
 }
@@ -125,36 +98,36 @@
 static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
 {
 	/* AFE_RXCONFIG_0 */
-	phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb15);
+	bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb15);
 
 	/* AFE_RXCONFIG_1 */
-	phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
+	bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
 
 	/* AFE_RXCONFIG_2, set rCal offset for HT=0 code and LT=-2 code */
-	phy_write_misc(phydev, AFE_RXCONFIG_2, 0x2003);
+	bcm_phy_write_misc(phydev, AFE_RXCONFIG_2, 0x2003);
 
 	/* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */
-	phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
+	bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
 
 	/* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
-	phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
+	bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
 
 	/* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
-	phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
+	bcm_phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
 
 	/* AFE_VDAC_OTHERS_0, set 1000BT Cidac=010 for all ports */
-	phy_write_misc(phydev, AFE_VDAC_OTHERS_0, 0xa020);
+	bcm_phy_write_misc(phydev, AFE_VDAC_OTHERS_0, 0xa020);
 
 	/* AFE_HPF_TRIM_OTHERS, set 100Tx/10BT to -4.5% swing and set rCal
 	 * offset for HT=0 code
 	 */
-	phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3);
+	bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3);
 
 	/* CORE_BASE1E, force trim to overwrite and set I_ext trim to 0000 */
-	phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0x0010);
+	phy_write(phydev, MII_BRCM_CORE_BASE1E, 0x0010);
 
 	/* DSP_TAP10, adjust bias current trim (+0% swing, +0 tick) */
-	phy_write_misc(phydev, DSP_TAP10, 0x011b);
+	bcm_phy_write_misc(phydev, DSP_TAP10, 0x011b);
 
 	/* Reset R_CAL/RC_CAL engine */
 	r_rc_cal_reset(phydev);
@@ -165,24 +138,24 @@
 static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
 {
 	/* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */
-	phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
+	bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
 
 	/* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
-	phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
+	bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
 
 	/* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
-	phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
+	bcm_phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
 
 	/* AFE_HPF_TRIM_OTHERS, set 100Tx/10BT to -4.5% swing and set rCal
 	 * offset for HT=0 code
 	 */
-	phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3);
+	bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x00e3);
 
 	/* CORE_BASE1E, force trim to overwrite and set I_ext trim to 0000 */
-	phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0x0010);
+	phy_write(phydev, MII_BRCM_CORE_BASE1E, 0x0010);
 
 	/* DSP_TAP10, adjust bias current trim (+0% swing, +0 tick) */
-	phy_write_misc(phydev, DSP_TAP10, 0x011b);
+	bcm_phy_write_misc(phydev, DSP_TAP10, 0x011b);
 
 	/* Reset R_CAL/RC_CAL engine */
 	r_rc_cal_reset(phydev);
@@ -190,53 +163,6 @@
 	return 0;
 }
 
-static int bcm7xxx_apd_enable(struct phy_device *phydev)
-{
-	int val;
-
-	/* Enable powering down of the DLL during auto-power down */
-	val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3);
-	if (val < 0)
-		return val;
-
-	val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
-	bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val);
-
-	/* Enable auto-power down */
-	val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD);
-	if (val < 0)
-		return val;
-
-	val |= BCM54XX_SHD_APD_EN;
-	return bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val);
-}
-
-static int bcm7xxx_eee_enable(struct phy_device *phydev)
-{
-	int val;
-
-	val = phy_read_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
-				    MDIO_MMD_AN, phydev->addr);
-	if (val < 0)
-		return val;
-
-	/* Enable general EEE feature at the PHY level */
-	val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X;
-
-	phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL,
-			       MDIO_MMD_AN, phydev->addr, val);
-
-	/* Advertise supported modes */
-	val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
-				    MDIO_MMD_AN, phydev->addr);
-
-	val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
-	phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
-			       MDIO_MMD_AN, phydev->addr, val);
-
-	return 0;
-}
-
 static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
 {
 	u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags);
@@ -273,11 +199,11 @@
 	if (ret)
 		return ret;
 
-	ret = bcm7xxx_eee_enable(phydev);
+	ret = bcm_phy_enable_eee(phydev);
 	if (ret)
 		return ret;
 
-	return bcm7xxx_apd_enable(phydev);
+	return bcm_phy_enable_apd(phydev, true);
 }
 
 static int bcm7xxx_28nm_resume(struct phy_device *phydev)
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 9c71295..07a6119 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -14,6 +14,7 @@
  *	2 of the License, or (at your option) any later version.
  */
 
+#include "bcm-phy-lib.h"
 #include <linux/module.h>
 #include <linux/phy.h>
 #include <linux/brcmphy.h>
@@ -29,39 +30,6 @@
 MODULE_AUTHOR("Maciej W. Rozycki");
 MODULE_LICENSE("GPL");
 
-/* Indirect register access functions for the Expansion Registers */
-static int bcm54xx_exp_read(struct phy_device *phydev, u16 regnum)
-{
-	int val;
-
-	val = phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum);
-	if (val < 0)
-		return val;
-
-	val = phy_read(phydev, MII_BCM54XX_EXP_DATA);
-
-	/* Restore default value.  It's O.K. if this write fails. */
-	phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
-
-	return val;
-}
-
-static int bcm54xx_exp_write(struct phy_device *phydev, u16 regnum, u16 val)
-{
-	int ret;
-
-	ret = phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum);
-	if (ret < 0)
-		return ret;
-
-	ret = phy_write(phydev, MII_BCM54XX_EXP_DATA, val);
-
-	/* Restore default value.  It's O.K. if this write fails. */
-	phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
-
-	return ret;
-}
-
 static int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val)
 {
 	return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val);
@@ -72,28 +40,28 @@
 {
 	int err;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0,
+	err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_AADJ1CH0,
 				MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN |
 				MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF);
 	if (err < 0)
 		return err;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3,
-					MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ);
+	err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_AADJ1CH3,
+				MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ);
 	if (err < 0)
 		return err;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75,
+	err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP75,
 				MII_BCM54XX_EXP_EXP75_VDACCTRL);
 	if (err < 0)
 		return err;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96,
+	err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP96,
 				MII_BCM54XX_EXP_EXP96_MYST);
 	if (err < 0)
 		return err;
 
-	err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97,
+	err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP97,
 				MII_BCM54XX_EXP_EXP97_MYST);
 
 	return err;
@@ -114,7 +82,7 @@
 	if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
 	    BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) {
 		/* Clear bit 9 to fix a phy interop issue. */
-		err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
+		err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08,
 					MII_BCM54XX_EXP_EXP08_RJCT_2MHZ);
 		if (err < 0)
 			goto error;
@@ -129,12 +97,12 @@
 	if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
 		int val;
 
-		val = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75);
+		val = bcm_phy_read_exp(phydev, MII_BCM54XX_EXP_EXP75);
 		if (val < 0)
 			goto error;
 
 		val |= MII_BCM54XX_EXP_EXP75_CM_OSC;
-		err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, val);
+		err = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP75, val);
 	}
 
 error:
@@ -159,7 +127,7 @@
 	    BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
 		return;
 
-	val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3);
+	val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
 	if (val < 0)
 		return;
 
@@ -190,9 +158,9 @@
 		val |= BCM54XX_SHD_SCR3_TRDDAPD;
 
 	if (orig != val)
-		bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val);
+		bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val);
 
-	val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD);
+	val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_APD);
 	if (val < 0)
 		return;
 
@@ -204,7 +172,7 @@
 		val &= ~BCM54XX_SHD_APD_EN;
 
 	if (orig != val)
-		bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val);
+		bcm_phy_write_shadow(phydev, BCM54XX_SHD_APD, val);
 }
 
 static int bcm54xx_config_init(struct phy_device *phydev)
@@ -232,7 +200,7 @@
 	if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
 	     BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
 	    (phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
-		bcm54xx_shadow_write(phydev, BCM54XX_SHD_RGMII_MODE, 0);
+		bcm_phy_write_shadow(phydev, BCM54XX_SHD_RGMII_MODE, 0);
 
 	if ((phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) ||
 	    (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) ||
@@ -254,8 +222,8 @@
 		/*
 		 * Enable secondary SerDes and its use as an LED source
 		 */
-		reg = bcm54xx_shadow_read(phydev, BCM5482_SHD_SSD);
-		bcm54xx_shadow_write(phydev, BCM5482_SHD_SSD,
+		reg = bcm_phy_read_shadow(phydev, BCM5482_SHD_SSD);
+		bcm_phy_write_shadow(phydev, BCM5482_SHD_SSD,
 				     reg |
 				     BCM5482_SHD_SSD_LEDM |
 				     BCM5482_SHD_SSD_EN);
@@ -264,10 +232,10 @@
 		 * Enable SGMII slave mode and auto-detection
 		 */
 		reg = BCM5482_SSD_SGMII_SLAVE | MII_BCM54XX_EXP_SEL_SSD;
-		err = bcm54xx_exp_read(phydev, reg);
+		err = bcm_phy_read_exp(phydev, reg);
 		if (err < 0)
 			return err;
-		err = bcm54xx_exp_write(phydev, reg, err |
+		err = bcm_phy_write_exp(phydev, reg, err |
 					BCM5482_SSD_SGMII_SLAVE_EN |
 					BCM5482_SSD_SGMII_SLAVE_AD);
 		if (err < 0)
@@ -277,10 +245,10 @@
 		 * Disable secondary SerDes powerdown
 		 */
 		reg = BCM5482_SSD_1000BX_CTL | MII_BCM54XX_EXP_SEL_SSD;
-		err = bcm54xx_exp_read(phydev, reg);
+		err = bcm_phy_read_exp(phydev, reg);
 		if (err < 0)
 			return err;
-		err = bcm54xx_exp_write(phydev, reg,
+		err = bcm_phy_write_exp(phydev, reg,
 					err & ~BCM5482_SSD_1000BX_CTL_PWRDOWN);
 		if (err < 0)
 			return err;
@@ -288,15 +256,15 @@
 		/*
 		 * Select 1000BASE-X register set (primary SerDes)
 		 */
-		reg = bcm54xx_shadow_read(phydev, BCM5482_SHD_MODE);
-		bcm54xx_shadow_write(phydev, BCM5482_SHD_MODE,
+		reg = bcm_phy_read_shadow(phydev, BCM5482_SHD_MODE);
+		bcm_phy_write_shadow(phydev, BCM5482_SHD_MODE,
 				     reg | BCM5482_SHD_MODE_1000BX);
 
 		/*
 		 * LED1=ACTIVITYLED, LED3=LINKSPD[2]
 		 * (Use LED1 as secondary SerDes ACTIVITY LED)
 		 */
-		bcm54xx_shadow_write(phydev, BCM5482_SHD_LEDS1,
+		bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1,
 			BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_ACTIVITYLED) |
 			BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_LINKSPD2));
 
@@ -334,35 +302,6 @@
 	return err;
 }
 
-static int bcm54xx_ack_interrupt(struct phy_device *phydev)
-{
-	int reg;
-
-	/* Clear pending interrupts.  */
-	reg = phy_read(phydev, MII_BCM54XX_ISR);
-	if (reg < 0)
-		return reg;
-
-	return 0;
-}
-
-static int bcm54xx_config_intr(struct phy_device *phydev)
-{
-	int reg, err;
-
-	reg = phy_read(phydev, MII_BCM54XX_ECR);
-	if (reg < 0)
-		return reg;
-
-	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
-		reg &= ~MII_BCM54XX_ECR_IM;
-	else
-		reg |= MII_BCM54XX_ECR_IM;
-
-	err = phy_write(phydev, MII_BCM54XX_ECR, reg);
-	return err;
-}
-
 static int bcm5481_config_aneg(struct phy_device *phydev)
 {
 	int ret;
@@ -519,8 +458,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM5421,
@@ -532,8 +471,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM5461,
@@ -545,8 +484,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM54616S,
@@ -558,8 +497,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM5464,
@@ -571,8 +510,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM5481,
@@ -584,8 +523,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= bcm5481_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM5482,
@@ -597,8 +536,8 @@
 	.config_init	= bcm5482_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= bcm5482_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM50610,
@@ -610,8 +549,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM50610M,
@@ -623,8 +562,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCM57780,
@@ -636,8 +575,8 @@
 	.config_init	= bcm54xx_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
-	.ack_interrupt	= bcm54xx_ack_interrupt,
-	.config_intr	= bcm54xx_config_intr,
+	.ack_interrupt	= bcm_phy_ack_intr,
+	.config_intr	= bcm_phy_config_intr,
 	.driver		= { .owner = THIS_MODULE },
 }, {
 	.phy_id		= PHY_ID_BCMAC131,
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
new file mode 100644
index 0000000..5ce9bef
--- /dev/null
+++ b/drivers/net/phy/dp83848.c
@@ -0,0 +1,99 @@
+/*
+ * Driver for the Texas Instruments DP83848 PHY
+ *
+ * Copyright (C) 2015 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define DP83848_PHY_ID			0x20005c90
+
+/* Registers */
+#define DP83848_MICR			0x11
+#define DP83848_MISR			0x12
+
+/* MICR Register Fields */
+#define DP83848_MICR_INT_OE		BIT(0) /* Interrupt Output Enable */
+#define DP83848_MICR_INTEN		BIT(1) /* Interrupt Enable */
+
+/* MISR Register Fields */
+#define DP83848_MISR_RHF_INT_EN		BIT(0) /* Receive Error Counter */
+#define DP83848_MISR_FHF_INT_EN		BIT(1) /* False Carrier Counter */
+#define DP83848_MISR_ANC_INT_EN		BIT(2) /* Auto-negotiation complete */
+#define DP83848_MISR_DUP_INT_EN		BIT(3) /* Duplex Status */
+#define DP83848_MISR_SPD_INT_EN		BIT(4) /* Speed status */
+#define DP83848_MISR_LINK_INT_EN	BIT(5) /* Link status */
+#define DP83848_MISR_ED_INT_EN		BIT(6) /* Energy detect */
+#define DP83848_MISR_LQM_INT_EN		BIT(7) /* Link Quality Monitor */
+
+static int dp83848_ack_interrupt(struct phy_device *phydev)
+{
+	int err = phy_read(phydev, DP83848_MISR);
+
+	return err < 0 ? err : 0;
+}
+
+static int dp83848_config_intr(struct phy_device *phydev)
+{
+	int err;
+
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+		err = phy_write(phydev, DP83848_MICR,
+				DP83848_MICR_INT_OE |
+				DP83848_MICR_INTEN);
+		if (err < 0)
+			return err;
+
+		return phy_write(phydev, DP83848_MISR,
+				 DP83848_MISR_ANC_INT_EN |
+				 DP83848_MISR_DUP_INT_EN |
+				 DP83848_MISR_SPD_INT_EN |
+				 DP83848_MISR_LINK_INT_EN);
+	}
+
+	return phy_write(phydev, DP83848_MICR, 0x0);
+}
+
+static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+	{ DP83848_PHY_ID, 0xfffffff0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+
+static struct phy_driver dp83848_driver[] = {
+	{
+		.phy_id		= DP83848_PHY_ID,
+		.phy_id_mask	= 0xfffffff0,
+		.name		= "TI DP83848",
+		.features	= PHY_BASIC_FEATURES,
+		.flags		= PHY_HAS_INTERRUPT,
+
+		.soft_reset	= genphy_soft_reset,
+		.config_init	= genphy_config_init,
+		.suspend	= genphy_suspend,
+		.resume		= genphy_resume,
+		.config_aneg	= genphy_config_aneg,
+		.read_status	= genphy_read_status,
+
+		/* IRQ related */
+		.ack_interrupt	= dp83848_ack_interrupt,
+		.config_intr	= dp83848_config_intr,
+
+		.driver		= { .owner = THIS_MODULE, },
+	},
+};
+module_phy_driver(dp83848_driver);
+
+MODULE_DESCRIPTION("Texas Instruments DP83848 PHY driver");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
new file mode 100644
index 0000000..c0b4e65
--- /dev/null
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+
+#define IPROC_GPHY_MDCDIV    0x1a
+
+#define MII_CTRL_OFFSET      0x000
+
+#define MII_CTRL_DIV_SHIFT   0
+#define MII_CTRL_PRE_SHIFT   7
+#define MII_CTRL_BUSY_SHIFT  8
+
+#define MII_DATA_OFFSET      0x004
+#define MII_DATA_MASK        0xffff
+#define MII_DATA_TA_SHIFT    16
+#define MII_DATA_TA_VAL      2
+#define MII_DATA_RA_SHIFT    18
+#define MII_DATA_PA_SHIFT    23
+#define MII_DATA_OP_SHIFT    28
+#define MII_DATA_OP_WRITE    1
+#define MII_DATA_OP_READ     2
+#define MII_DATA_SB_SHIFT    30
+
+struct iproc_mdio_priv {
+	struct mii_bus *mii_bus;
+	void __iomem *base;
+};
+
+static inline int iproc_mdio_wait_for_idle(void __iomem *base)
+{
+	u32 val;
+	unsigned int timeout = 1000; /* loop for 1s */
+
+	do {
+		val = readl(base + MII_CTRL_OFFSET);
+		if ((val & BIT(MII_CTRL_BUSY_SHIFT)) == 0)
+			return 0;
+
+		usleep_range(1000, 2000);
+	} while (timeout--);
+
+	return -ETIMEDOUT;
+}
+
+static inline void iproc_mdio_config_clk(void __iomem *base)
+{
+	u32 val;
+
+	val = (IPROC_GPHY_MDCDIV << MII_CTRL_DIV_SHIFT) |
+		  BIT(MII_CTRL_PRE_SHIFT);
+	writel(val, base + MII_CTRL_OFFSET);
+}
+
+static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+	struct iproc_mdio_priv *priv = bus->priv;
+	u32 cmd;
+	int rc;
+
+	rc = iproc_mdio_wait_for_idle(priv->base);
+	if (rc)
+		return rc;
+
+	iproc_mdio_config_clk(priv->base);
+
+	/* Prepare the read operation */
+	cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
+		(reg << MII_DATA_RA_SHIFT) |
+		(phy_id << MII_DATA_PA_SHIFT) |
+		BIT(MII_DATA_SB_SHIFT) |
+		(MII_DATA_OP_READ << MII_DATA_OP_SHIFT);
+
+	writel(cmd, priv->base + MII_DATA_OFFSET);
+
+	rc = iproc_mdio_wait_for_idle(priv->base);
+	if (rc)
+		return rc;
+
+	cmd = readl(priv->base + MII_DATA_OFFSET) & MII_DATA_MASK;
+
+	return cmd;
+}
+
+static int iproc_mdio_write(struct mii_bus *bus, int phy_id,
+			    int reg, u16 val)
+{
+	struct iproc_mdio_priv *priv = bus->priv;
+	u32 cmd;
+	int rc;
+
+	rc = iproc_mdio_wait_for_idle(priv->base);
+	if (rc)
+		return rc;
+
+	iproc_mdio_config_clk(priv->base);
+
+	/* Prepare the write operation */
+	cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
+		(reg << MII_DATA_RA_SHIFT) |
+		(phy_id << MII_DATA_PA_SHIFT) |
+		BIT(MII_DATA_SB_SHIFT) |
+		(MII_DATA_OP_WRITE << MII_DATA_OP_SHIFT) |
+		((u32)(val) & MII_DATA_MASK);
+
+	writel(cmd, priv->base + MII_DATA_OFFSET);
+
+	rc = iproc_mdio_wait_for_idle(priv->base);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int iproc_mdio_probe(struct platform_device *pdev)
+{
+	struct iproc_mdio_priv *priv;
+	struct mii_bus *bus;
+	struct resource *res;
+	int rc;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->base)) {
+		dev_err(&pdev->dev, "failed to ioremap register\n");
+		return PTR_ERR(priv->base);
+	}
+
+	priv->mii_bus = mdiobus_alloc();
+	if (!priv->mii_bus) {
+		dev_err(&pdev->dev, "MDIO bus alloc failed\n");
+		return -ENOMEM;
+	}
+
+	bus = priv->mii_bus;
+	bus->priv = priv;
+	bus->name = "iProc MDIO bus";
+	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
+	bus->parent = &pdev->dev;
+	bus->read = iproc_mdio_read;
+	bus->write = iproc_mdio_write;
+
+	rc = of_mdiobus_register(bus, pdev->dev.of_node);
+	if (rc) {
+		dev_err(&pdev->dev, "MDIO bus registration failed\n");
+		goto err_iproc_mdio;
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	dev_info(&pdev->dev, "Broadcom iProc MDIO bus at 0x%p\n", priv->base);
+
+	return 0;
+
+err_iproc_mdio:
+	mdiobus_free(bus);
+	return rc;
+}
+
+static int iproc_mdio_remove(struct platform_device *pdev)
+{
+	struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
+
+	mdiobus_unregister(priv->mii_bus);
+	mdiobus_free(priv->mii_bus);
+
+	return 0;
+}
+
+static const struct of_device_id iproc_mdio_of_match[] = {
+	{ .compatible = "brcm,iproc-mdio", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, iproc_mdio_of_match);
+
+static struct platform_driver iproc_mdio_driver = {
+	.driver = {
+		.name = "iproc-mdio",
+		.of_match_table = iproc_mdio_of_match,
+	},
+	.probe = iproc_mdio_probe,
+	.remove = iproc_mdio_remove,
+};
+
+module_platform_driver(iproc_mdio_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom iProc MDIO bus controller");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:iproc-mdio");
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 3bc9f03..95f51d7 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -25,7 +25,7 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
-#include <linux/mdio-gpio.h>
+#include <linux/platform_data/mdio-gpio.h>
 
 #include <linux/of_gpio.h>
 #include <linux/of_mdio.h>
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 12f44c5..88cb459 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -372,6 +372,33 @@
 EXPORT_SYMBOL(mdiobus_scan);
 
 /**
+ * mdiobus_read_nested - Nested version of the mdiobus_read function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to read
+ *
+ * In case of nested MDIO bus access avoid lockdep false positives by
+ * using mutex_lock_nested().
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
+{
+	int retval;
+
+	BUG_ON(in_interrupt());
+
+	mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+	retval = bus->read(bus, addr, regnum);
+	mutex_unlock(&bus->mdio_lock);
+
+	return retval;
+}
+EXPORT_SYMBOL(mdiobus_read_nested);
+
+/**
  * mdiobus_read - Convenience function for reading a given MII mgmt register
  * @bus: the mii_bus struct
  * @addr: the phy address
@@ -396,6 +423,34 @@
 EXPORT_SYMBOL(mdiobus_read);
 
 /**
+ * mdiobus_write_nested - Nested version of the mdiobus_write function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * In case of nested MDIO bus access avoid lockdep false positives by
+ * using mutex_lock_nested().
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
+{
+	int err;
+
+	BUG_ON(in_interrupt());
+
+	mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+	err = bus->write(bus, addr, regnum, val);
+	mutex_unlock(&bus->mdio_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(mdiobus_write_nested);
+
+/**
  * mdiobus_write - Convenience function for writing a given MII mgmt register
  * @bus: the mii_bus struct
  * @addr: the phy address
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 499185e..cf6312f 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -514,6 +514,27 @@
 	return 0;
 }
 
+static int ksz9031_read_status(struct phy_device *phydev)
+{
+	int err;
+	int regval;
+
+	err = genphy_read_status(phydev);
+	if (err)
+		return err;
+
+	/* Make sure the PHY is not broken. Read idle error count,
+	 * and reset the PHY if it is maxed out.
+	 */
+	regval = phy_read(phydev, MII_STAT1000);
+	if ((regval & 0xFF) == 0xFF) {
+		phy_init_hw(phydev);
+		phydev->link = 0;
+	}
+
+	return 0;
+}
+
 static int ksz8873mll_config_aneg(struct phy_device *phydev)
 {
 	return 0;
@@ -772,7 +793,7 @@
 	.driver_data	= &ksz9021_type,
 	.config_init	= ksz9031_config_init,
 	.config_aneg	= genphy_config_aneg,
-	.read_status	= genphy_read_status,
+	.read_status	= ksz9031_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
 	.config_intr	= kszphy_config_intr,
 	.suspend	= genphy_suspend,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index f761288..3833891 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1239,6 +1239,44 @@
 	return 0;
 }
 
+static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+{
+	/* The default values for phydev->supported are provided by the PHY
+	 * driver "features" member, we want to reset to sane defaults first
+	 * before supporting higher speeds.
+	 */
+	phydev->supported &= PHY_DEFAULT_FEATURES;
+
+	switch (max_speed) {
+	default:
+		return -ENOTSUPP;
+	case SPEED_1000:
+		phydev->supported |= PHY_1000BT_FEATURES;
+		/* fall through */
+	case SPEED_100:
+		phydev->supported |= PHY_100BT_FEATURES;
+		/* fall through */
+	case SPEED_10:
+		phydev->supported |= PHY_10BT_FEATURES;
+	}
+
+	return 0;
+}
+
+int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
+{
+	int err;
+
+	err = __set_phy_supported(phydev, max_speed);
+	if (err)
+		return err;
+
+	phydev->advertising = phydev->supported;
+
+	return 0;
+}
+EXPORT_SYMBOL(phy_set_max_speed);
+
 static void of_set_phy_supported(struct phy_device *phydev)
 {
 	struct device_node *node = phydev->dev.of_node;
@@ -1250,25 +1288,8 @@
 	if (!node)
 		return;
 
-	if (!of_property_read_u32(node, "max-speed", &max_speed)) {
-		/* The default values for phydev->supported are provided by the PHY
-		 * driver "features" member, we want to reset to sane defaults fist
-		 * before supporting higher speeds.
-		 */
-		phydev->supported &= PHY_DEFAULT_FEATURES;
-
-		switch (max_speed) {
-		default:
-			return;
-
-		case SPEED_1000:
-			phydev->supported |= PHY_1000BT_FEATURES;
-		case SPEED_100:
-			phydev->supported |= PHY_100BT_FEATURES;
-		case SPEED_10:
-			phydev->supported |= PHY_10BT_FEATURES;
-		}
-	}
+	if (!of_property_read_u32(node, "max-speed", &max_speed))
+		__set_phy_supported(phydev, max_speed);
 }
 
 /**
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 70b0895..dc2da87 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -43,16 +43,25 @@
 
 static int smsc_phy_config_init(struct phy_device *phydev)
 {
+	int __maybe_unused len;
+	struct device *dev __maybe_unused = &phydev->dev;
+	struct device_node *of_node __maybe_unused = dev->of_node;
 	int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+	int enable_energy = 1;
 
 	if (rc < 0)
 		return rc;
 
-	/* Enable energy detect mode for this SMSC Transceivers */
-	rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
-		       rc | MII_LAN83C185_EDPWRDOWN);
-	if (rc < 0)
-		return rc;
+	if (of_find_property(of_node, "smsc,disable-energy-detect", &len))
+		enable_energy = 0;
+
+	if (enable_energy) {
+		/* Enable energy detect mode for this SMSC Transceivers */
+		rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+			       rc | MII_LAN83C185_EDPWRDOWN);
+		if (rc < 0)
+			return rc;
+	}
 
 	return smsc_phy_ack_interrupt(phydev);
 }
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 91e1bec..07463fc 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -112,20 +112,7 @@
 },
 };
 
-static int __init teranetics_init(void)
-{
-	return phy_drivers_register(teranetics_driver,
-				    ARRAY_SIZE(teranetics_driver));
-}
-
-static void __exit teranetics_exit(void)
-{
-	return phy_drivers_unregister(teranetics_driver,
-				      ARRAY_SIZE(teranetics_driver));
-}
-
-module_init(teranetics_init);
-module_exit(teranetics_exit);
+module_phy_driver(teranetics_driver);
 
 static struct mdio_device_id __maybe_unused teranetics_tbl[] = {
 	{ PHY_ID_TN2020, 0xffffffff },
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 3837ae3..5e0b432 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -313,7 +313,6 @@
 			if (po->pppoe_dev == dev &&
 			    sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
 				pppox_unbind_sock(sk);
-				sk->sk_state = PPPOX_ZOMBIE;
 				sk->sk_state_change(sk);
 				po->pppoe_dev = NULL;
 				dev_put(dev);
@@ -590,7 +589,7 @@
 
 	po = pppox_sk(sk);
 
-	if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+	if (po->pppoe_dev) {
 		dev_put(po->pppoe_dev);
 		po->pppoe_dev = NULL;
 	}
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 686f37d..fc69e41 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -169,6 +169,7 @@
 {
 	struct sock *sk = (struct sock *) chan->private;
 	struct pppox_sock *po = pppox_sk(sk);
+	struct net *net = sock_net(sk);
 	struct pptp_opt *opt = &po->proto.pptp;
 	struct pptp_gre_header *hdr;
 	unsigned int header_len = sizeof(*hdr);
@@ -187,7 +188,7 @@
 	if (sk_pppox(po)->sk_state & PPPOX_DEAD)
 		goto tx_error;
 
-	rt = ip_route_output_ports(sock_net(sk), &fl4, NULL,
+	rt = ip_route_output_ports(net, &fl4, NULL,
 				   opt->dst_addr.sin_addr.s_addr,
 				   opt->src_addr.sin_addr.s_addr,
 				   0, 0, IPPROTO_GRE,
@@ -279,10 +280,10 @@
 	nf_reset(skb);
 
 	skb->ip_summed = CHECKSUM_NONE;
-	ip_select_ident(sock_net(sk), skb, NULL);
+	ip_select_ident(net, skb, NULL);
 	ip_send_check(iph);
 
-	ip_local_out(skb);
+	ip_local_out(net, skb->sk, skb);
 	return 1;
 
 tx_error:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 976aa97..b1878fa 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -858,7 +858,7 @@
 	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
 		goto drop;
 
-	if (skb->sk) {
+	if (skb->sk && sk_fullsock(skb->sk)) {
 		sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
 		sw_tx_timestamp(skb);
 	}
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3a8a36c..7f83504 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -166,6 +166,7 @@
 	    * Aten UC210T
 	    * ASIX AX88172
 	    * Billionton Systems, USB2AR
+	    * Billionton Systems, GUSB2AM-1G-B
 	    * Buffalo LUA-U2-KTX
 	    * Corega FEther USB2-TX
 	    * D-Link DUB-E100
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 5d049d0..a2d3ea6 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -168,7 +168,7 @@
 struct asix_rx_fixup_info {
 	struct sk_buff *ax_skb;
 	u32 header;
-	u16 size;
+	u16 remaining;
 	bool split_head;
 };
 
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 75d6f26..bd9acff 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -54,69 +54,101 @@
 			   struct asix_rx_fixup_info *rx)
 {
 	int offset = 0;
+	u16 size;
+
+	/* When an Ethernet frame spans multiple URB socket buffers,
+	 * do a sanity test for the Data header synchronisation.
+	 * Attempt to detect the situation of the previous socket buffer having
+	 * been truncated or a socket buffer was missing. These situations
+	 * cause a discontinuity in the data stream and therefore need to avoid
+	 * appending bad data to the end of the current netdev socket buffer.
+	 * Also avoid unnecessarily discarding a good current netdev socket
+	 * buffer.
+	 */
+	if (rx->remaining && (rx->remaining + sizeof(u32) <= skb->len)) {
+		offset = ((rx->remaining + 1) & 0xfffe) + sizeof(u32);
+		rx->header = get_unaligned_le32(skb->data + offset);
+		offset = 0;
+
+		size = (u16)(rx->header & 0x7ff);
+		if (size != ((~rx->header >> 16) & 0x7ff)) {
+			netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
+				   rx->remaining);
+			if (rx->ax_skb) {
+				kfree_skb(rx->ax_skb);
+				rx->ax_skb = NULL;
+				/* Discard the incomplete netdev Ethernet frame
+				 * and assume the Data header is at the start of
+				 * the current URB socket buffer.
+				 */
+			}
+			rx->remaining = 0;
+		}
+	}
 
 	while (offset + sizeof(u16) <= skb->len) {
-		u16 remaining = 0;
+		u16 copy_length;
 		unsigned char *data;
 
-		if (!rx->size) {
-			if ((skb->len - offset == sizeof(u16)) ||
-			    rx->split_head) {
-				if(!rx->split_head) {
-					rx->header = get_unaligned_le16(
-							skb->data + offset);
-					rx->split_head = true;
-					offset += sizeof(u16);
-					break;
-				} else {
-					rx->header |= (get_unaligned_le16(
-							skb->data + offset)
-							<< 16);
-					rx->split_head = false;
-					offset += sizeof(u16);
-				}
+		if (!rx->remaining) {
+			if (skb->len - offset == sizeof(u16)) {
+				rx->header = get_unaligned_le16(
+						skb->data + offset);
+				rx->split_head = true;
+				offset += sizeof(u16);
+				break;
+			}
+
+			if (rx->split_head == true) {
+				rx->header |= (get_unaligned_le16(
+						skb->data + offset) << 16);
+				rx->split_head = false;
+				offset += sizeof(u16);
 			} else {
 				rx->header = get_unaligned_le32(skb->data +
 								offset);
 				offset += sizeof(u32);
 			}
 
-			/* get the packet length */
-			rx->size = (u16) (rx->header & 0x7ff);
-			if (rx->size != ((~rx->header >> 16) & 0x7ff)) {
+			/* take frame length from Data header 32-bit word */
+			size = (u16)(rx->header & 0x7ff);
+			if (size != ((~rx->header >> 16) & 0x7ff)) {
 				netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
 					   rx->header, offset);
-				rx->size = 0;
 				return 0;
 			}
-			rx->ax_skb = netdev_alloc_skb_ip_align(dev->net,
-							       rx->size);
-			if (!rx->ax_skb)
+			if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
+				netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
+					   size);
 				return 0;
+			}
+
+			/* Sometimes may fail to get a netdev socket buffer but
+			 * continue to process the URB socket buffer so that
+			 * synchronisation of the Ethernet frame Data header
+			 * word is maintained.
+			 */
+			rx->ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
+
+			rx->remaining = size;
 		}
 
-		if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
-			netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
-				   rx->size);
-			kfree_skb(rx->ax_skb);
-			rx->ax_skb = NULL;
-			rx->size = 0U;
-
-			return 0;
+		if (rx->remaining > skb->len - offset) {
+			copy_length = skb->len - offset;
+			rx->remaining -= copy_length;
+		} else {
+			copy_length = rx->remaining;
+			rx->remaining = 0;
 		}
 
-		if (rx->size > skb->len - offset) {
-			remaining = rx->size - (skb->len - offset);
-			rx->size = skb->len - offset;
+		if (rx->ax_skb) {
+			data = skb_put(rx->ax_skb, copy_length);
+			memcpy(data, skb->data + offset, copy_length);
+			if (!rx->remaining)
+				usbnet_skb_return(dev, rx->ax_skb);
 		}
 
-		data = skb_put(rx->ax_skb, rx->size);
-		memcpy(data, skb->data + offset, rx->size);
-		if (!remaining)
-			usbnet_skb_return(dev, rx->ax_skb);
-
-		offset += (rx->size + 1) & 0xfffe;
-		rx->size = remaining;
+		offset += (copy_length + 1) & 0xfffe;
 	}
 
 	if (skb->len != offset) {
@@ -556,7 +588,6 @@
 	usbnet_get_drvinfo(net, info);
 	strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
 	strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
-	info->eedump_len = AX_EEPROM_LEN;
 }
 
 int asix_set_mac_address(struct net_device *net, void *p)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 1173a24..5cabefc 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -959,6 +959,10 @@
 	USB_DEVICE (0x08dd, 0x90ff),
 	.driver_info =  (unsigned long) &ax8817x_info,
 }, {
+	// Billionton Systems, GUSB2AM-1G-B
+	USB_DEVICE(0x08dd, 0x0114),
+	.driver_info =  (unsigned long) &ax88178_info,
+}, {
 	// ATEN UC210T
 	USB_DEVICE (0x0557, 0x2009),
 	.driver_info =  (unsigned long) &ax8817x_info,
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 6e9c344..0b4bdd3 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -258,7 +258,6 @@
 {
 	/* Inherit standard device info */
 	usbnet_get_drvinfo(net, info);
-	info->eedump_len = DM_EEPROM_LEN;
 }
 
 static u32 dm9601_get_link(struct net_device *net)
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 82d844a..4f345bd 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -445,7 +445,6 @@
 static void mcs7830_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *drvinfo)
 {
 	usbnet_get_drvinfo(net, drvinfo);
-	drvinfo->regdump_len = mcs7830_get_regs_len(net);
 }
 
 static void mcs7830_get_regs(struct net_device *net, struct ethtool_regs *regs, void *data)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 4752e69..75ae756 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -711,6 +711,10 @@
 	{QMI_FIXED_INTF(0x1199, 0x9056, 8)},	/* Sierra Wireless Modem */
 	{QMI_FIXED_INTF(0x1199, 0x9057, 8)},
 	{QMI_FIXED_INTF(0x1199, 0x9061, 8)},	/* Sierra Wireless Modem */
+	{QMI_FIXED_INTF(0x1199, 0x9070, 8)},	/* Sierra Wireless MC74xx/EM74xx */
+	{QMI_FIXED_INTF(0x1199, 0x9070, 10)},	/* Sierra Wireless MC74xx/EM74xx */
+	{QMI_FIXED_INTF(0x1199, 0x9071, 8)},	/* Sierra Wireless MC74xx/EM74xx */
+	{QMI_FIXED_INTF(0x1199, 0x9071, 10)},	/* Sierra Wireless MC74xx/EM74xx */
 	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
 	{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},	/* Alcatel L800MA */
 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 953de13..a50df0d 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -470,14 +470,10 @@
 static void sr_get_drvinfo(struct net_device *net,
 				 struct ethtool_drvinfo *info)
 {
-	struct usbnet *dev = netdev_priv(net);
-	struct sr_data *data = (struct sr_data *)&dev->data;
-
 	/* Inherit standard device info */
 	usbnet_get_drvinfo(net, info);
 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
 	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
-	info->eedump_len = data->eeprom_len;
 }
 
 static u32 sr_get_link(struct net_device *net)
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index a681569..9ba11d7 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -214,10 +214,6 @@
 
 	strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
 		sizeof(drvinfo->bus_info));
-	drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS);
-	drvinfo->testinfo_len = 0;
-	drvinfo->eedump_len   = 0;
-	drvinfo->regdump_len  = vmxnet3_get_regs_len(netdev);
 }
 
 
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 4ecb3a3..92fa3e1 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -30,20 +30,38 @@
 #include <net/arp.h>
 #include <net/ip.h>
 #include <net/ip_fib.h>
+#include <net/ip6_fib.h>
 #include <net/ip6_route.h>
 #include <net/rtnetlink.h>
 #include <net/route.h>
 #include <net/addrconf.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
+
+#define RT_FL_TOS(oldflp4) \
+	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
 
 #define DRV_NAME	"vrf"
 #define DRV_VERSION	"1.0"
 
-#define vrf_is_slave(dev)   ((dev)->flags & IFF_SLAVE)
-
 #define vrf_master_get_rcu(dev) \
 	((struct net_device *)rcu_dereference(dev->rx_handler_data))
 
+struct slave {
+	struct list_head        list;
+	struct net_device       *dev;
+};
+
+struct slave_queue {
+	struct list_head        all_slaves;
+};
+
+struct net_vrf {
+	struct slave_queue      queue;
+	struct rtable           *rth;
+	struct rt6_info		*rt6;
+	u32                     tb_id;
+};
+
 struct pcpu_dstats {
 	u64			tx_pkts;
 	u64			tx_bytes;
@@ -58,9 +76,9 @@
 	return dst;
 }
 
-static int vrf_ip_local_out(struct sk_buff *skb)
+static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	return ip_local_out(skb);
+	return ip_local_out(net, sk, skb);
 }
 
 static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
@@ -88,12 +106,56 @@
 	.default_advmss	= vrf_default_advmss,
 };
 
+/* neighbor handling is done with actual device; do not want
+ * to flip skb->dev for those ndisc packets. This really fails
+ * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
+ * a start.
+ */
+#if IS_ENABLED(CONFIG_IPV6)
+static bool check_ipv6_frame(const struct sk_buff *skb)
+{
+	const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data;
+	size_t hlen = sizeof(*ipv6h);
+	bool rc = true;
+
+	if (skb->len < hlen)
+		goto out;
+
+	if (ipv6h->nexthdr == NEXTHDR_ICMP) {
+		const struct icmp6hdr *icmph;
+
+		if (skb->len < hlen + sizeof(*icmph))
+			goto out;
+
+		icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h));
+		switch (icmph->icmp6_type) {
+		case NDISC_ROUTER_SOLICITATION:
+		case NDISC_ROUTER_ADVERTISEMENT:
+		case NDISC_NEIGHBOUR_SOLICITATION:
+		case NDISC_NEIGHBOUR_ADVERTISEMENT:
+		case NDISC_REDIRECT:
+			rc = false;
+			break;
+		}
+	}
+
+out:
+	return rc;
+}
+#else
+static bool check_ipv6_frame(const struct sk_buff *skb)
+{
+	return false;
+}
+#endif
+
 static bool is_ip_rx_frame(struct sk_buff *skb)
 {
 	switch (skb->protocol) {
 	case htons(ETH_P_IP):
-	case htons(ETH_P_IPV6):
 		return true;
+	case htons(ETH_P_IPV6):
+		return check_ipv6_frame(skb);
 	}
 	return false;
 }
@@ -153,12 +215,53 @@
 	return stats;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
+					   struct net_device *dev)
+{
+	const struct ipv6hdr *iph = ipv6_hdr(skb);
+	struct net *net = dev_net(skb->dev);
+	struct flowi6 fl6 = {
+		/* needed to match OIF rule */
+		.flowi6_oif = dev->ifindex,
+		.flowi6_iif = LOOPBACK_IFINDEX,
+		.daddr = iph->daddr,
+		.saddr = iph->saddr,
+		.flowlabel = ip6_flowinfo(iph),
+		.flowi6_mark = skb->mark,
+		.flowi6_proto = iph->nexthdr,
+		.flowi6_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF,
+	};
+	int ret = NET_XMIT_DROP;
+	struct dst_entry *dst;
+	struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
+
+	dst = ip6_route_output(net, NULL, &fl6);
+	if (dst == dst_null)
+		goto err;
+
+	skb_dst_drop(skb);
+	skb_dst_set(skb, dst);
+
+	ret = ip6_local_out(net, skb->sk, skb);
+	if (unlikely(net_xmit_eval(ret)))
+		dev->stats.tx_errors++;
+	else
+		ret = NET_XMIT_SUCCESS;
+
+	return ret;
+err:
+	vrf_tx_error(dev, skb);
+	return NET_XMIT_DROP;
+}
+#else
 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
 					   struct net_device *dev)
 {
 	vrf_tx_error(dev, skb);
 	return NET_XMIT_DROP;
 }
+#endif
 
 static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
 			    struct net_device *vrf_dev)
@@ -193,7 +296,7 @@
 		.flowi4_oif = vrf_dev->ifindex,
 		.flowi4_iif = LOOPBACK_IFINDEX,
 		.flowi4_tos = RT_TOS(ip4h->tos),
-		.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC |
+		.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_L3MDEV_SRC |
 				FLOWI_FLAG_SKIP_NH_OIF,
 		.daddr = ip4h->daddr,
 	};
@@ -206,7 +309,7 @@
 					       RT_SCOPE_LINK);
 	}
 
-	ret = ip_local_out(skb);
+	ret = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
 	if (unlikely(net_xmit_eval(ret)))
 		vrf_dev->stats.tx_errors++;
 	else
@@ -253,6 +356,157 @@
 	return ret;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
+{
+	return dst;
+}
+
+static struct dst_ops vrf_dst_ops6 = {
+	.family		= AF_INET6,
+	.local_out	= ip6_local_out,
+	.check		= vrf_ip6_check,
+	.mtu		= vrf_v4_mtu,
+	.destroy	= vrf_dst_destroy,
+	.default_advmss	= vrf_default_advmss,
+};
+
+static int init_dst_ops6_kmem_cachep(void)
+{
+	vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
+						     sizeof(struct rt6_info),
+						     0,
+						     SLAB_HWCACHE_ALIGN,
+						     NULL);
+
+	if (!vrf_dst_ops6.kmem_cachep)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void free_dst_ops6_kmem_cachep(void)
+{
+	kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
+}
+
+static int vrf_input6(struct sk_buff *skb)
+{
+	skb->dev->stats.rx_errors++;
+	kfree_skb(skb);
+	return 0;
+}
+
+/* modelled after ip6_finish_output2 */
+static int vrf_finish_output6(struct net *net, struct sock *sk,
+			      struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+	struct net_device *dev = dst->dev;
+	struct neighbour *neigh;
+	struct in6_addr *nexthop;
+	int ret;
+
+	skb->protocol = htons(ETH_P_IPV6);
+	skb->dev = dev;
+
+	rcu_read_lock_bh();
+	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
+	if (unlikely(!neigh))
+		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
+	if (!IS_ERR(neigh)) {
+		ret = dst_neigh_output(dst, neigh, skb);
+		rcu_read_unlock_bh();
+		return ret;
+	}
+	rcu_read_unlock_bh();
+
+	IP6_INC_STATS(dev_net(dst->dev),
+		      ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+	kfree_skb(skb);
+	return -EINVAL;
+}
+
+/* modelled after ip6_output */
+static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
+			    net, sk, skb, NULL, skb_dst(skb)->dev,
+			    vrf_finish_output6,
+			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
+}
+
+static void vrf_rt6_destroy(struct net_vrf *vrf)
+{
+	dst_destroy(&vrf->rt6->dst);
+	free_percpu(vrf->rt6->rt6i_pcpu);
+	vrf->rt6 = NULL;
+}
+
+static int vrf_rt6_create(struct net_device *dev)
+{
+	struct net_vrf *vrf = netdev_priv(dev);
+	struct dst_entry *dst;
+	struct rt6_info *rt6;
+	int cpu;
+	int rc = -ENOMEM;
+
+	rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
+			DST_OBSOLETE_NONE,
+			(DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+	if (!rt6)
+		goto out;
+
+	dst = &rt6->dst;
+
+	rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
+	if (!rt6->rt6i_pcpu) {
+		dst_destroy(dst);
+		goto out;
+	}
+	for_each_possible_cpu(cpu) {
+		struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
+		*p =  NULL;
+	}
+
+	memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
+
+	INIT_LIST_HEAD(&rt6->rt6i_siblings);
+	INIT_LIST_HEAD(&rt6->rt6i_uncached);
+
+	rt6->dst.input	= vrf_input6;
+	rt6->dst.output	= vrf_output6;
+
+	rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
+
+	atomic_set(&rt6->dst.__refcnt, 2);
+
+	vrf->rt6 = rt6;
+	rc = 0;
+out:
+	return rc;
+}
+#else
+static int init_dst_ops6_kmem_cachep(void)
+{
+	return 0;
+}
+
+static void free_dst_ops6_kmem_cachep(void)
+{
+}
+
+static void vrf_rt6_destroy(struct net_vrf *vrf)
+{
+}
+
+static int vrf_rt6_create(struct net_device *dev)
+{
+	return 0;
+}
+#endif
+
 /* modelled after ip_finish_output2 */
 static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
@@ -296,10 +550,9 @@
 	return ret;
 }
 
-static int vrf_output(struct sock *sk, struct sk_buff *skb)
+static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct net_device *dev = skb_dst(skb)->dev;
-	struct net *net = dev_net(dev);
 
 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
 
@@ -395,18 +648,15 @@
 
 static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
 {
-	struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL);
 	struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
 	struct net_vrf *vrf = netdev_priv(dev);
 	struct slave_queue *queue = &vrf->queue;
 	int ret = -ENOMEM;
 
-	if (!slave || !vrf_ptr)
+	if (!slave)
 		goto out_fail;
 
 	slave->dev = port_dev;
-	vrf_ptr->ifindex = dev->ifindex;
-	vrf_ptr->tb_id = vrf->tb_id;
 
 	/* register the packet handler for slave ports */
 	ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
@@ -421,9 +671,8 @@
 	if (ret < 0)
 		goto out_unregister;
 
-	port_dev->flags |= IFF_SLAVE;
+	port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
 	__vrf_insert_slave(queue, slave);
-	rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr);
 	cycle_netdev(port_dev);
 
 	return 0;
@@ -431,14 +680,13 @@
 out_unregister:
 	netdev_rx_handler_unregister(port_dev);
 out_fail:
-	kfree(vrf_ptr);
 	kfree(slave);
 	return ret;
 }
 
 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
 {
-	if (netif_is_vrf(port_dev) || vrf_is_slave(port_dev))
+	if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev))
 		return -EINVAL;
 
 	return do_vrf_add_slave(dev, port_dev);
@@ -447,21 +695,15 @@
 /* inverse of do_vrf_add_slave */
 static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
 {
-	struct net_vrf_dev *vrf_ptr = rtnl_dereference(port_dev->vrf_ptr);
 	struct net_vrf *vrf = netdev_priv(dev);
 	struct slave_queue *queue = &vrf->queue;
 	struct slave *slave;
 
-	RCU_INIT_POINTER(port_dev->vrf_ptr, NULL);
-
 	netdev_upper_dev_unlink(port_dev, dev);
-	port_dev->flags &= ~IFF_SLAVE;
+	port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
 
 	netdev_rx_handler_unregister(port_dev);
 
-	/* after netdev_rx_handler_unregister for synchronize_rcu */
-	kfree(vrf_ptr);
-
 	cycle_netdev(port_dev);
 
 	slave = __vrf_find_slave_dev(queue, port_dev);
@@ -486,6 +728,7 @@
 	struct slave *slave, *next;
 
 	vrf_rtable_destroy(vrf);
+	vrf_rt6_destroy(vrf);
 
 	list_for_each_entry_safe(slave, next, head, list)
 		vrf_del_slave(dev, slave->dev);
@@ -509,10 +752,15 @@
 	if (!vrf->rth)
 		goto out_stats;
 
+	if (vrf_rt6_create(dev) != 0)
+		goto out_rth;
+
 	dev->flags = IFF_MASTER | IFF_NOARP;
 
 	return 0;
 
+out_rth:
+	vrf_rtable_destroy(vrf);
 out_stats:
 	free_percpu(dev->dstats);
 	dev->dstats = NULL;
@@ -529,6 +777,85 @@
 	.ndo_del_slave		= vrf_del_slave,
 };
 
+static u32 vrf_fib_table(const struct net_device *dev)
+{
+	struct net_vrf *vrf = netdev_priv(dev);
+
+	return vrf->tb_id;
+}
+
+static struct rtable *vrf_get_rtable(const struct net_device *dev,
+				     const struct flowi4 *fl4)
+{
+	struct rtable *rth = NULL;
+
+	if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) {
+		struct net_vrf *vrf = netdev_priv(dev);
+
+		rth = vrf->rth;
+		atomic_inc(&rth->dst.__refcnt);
+	}
+
+	return rth;
+}
+
+/* called under rcu_read_lock */
+static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
+{
+	struct fib_result res = { .tclassid = 0 };
+	struct net *net = dev_net(dev);
+	u32 orig_tos = fl4->flowi4_tos;
+	u8 flags = fl4->flowi4_flags;
+	u8 scope = fl4->flowi4_scope;
+	u8 tos = RT_FL_TOS(fl4);
+
+	if (unlikely(!fl4->daddr))
+		return;
+
+	fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF;
+	fl4->flowi4_iif = LOOPBACK_IFINDEX;
+	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
+	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
+			     RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
+
+	if (!fib_lookup(net, fl4, &res, 0)) {
+		if (res.type == RTN_LOCAL)
+			fl4->saddr = res.fi->fib_prefsrc ? : fl4->daddr;
+		else
+			fib_select_path(net, &res, fl4, -1);
+	}
+
+	fl4->flowi4_flags = flags;
+	fl4->flowi4_tos = orig_tos;
+	fl4->flowi4_scope = scope;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
+					 const struct flowi6 *fl6)
+{
+	struct rt6_info *rt = NULL;
+
+	if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
+		struct net_vrf *vrf = netdev_priv(dev);
+
+		rt = vrf->rt6;
+		atomic_inc(&rt->dst.__refcnt);
+	}
+
+	return (struct dst_entry *)rt;
+}
+#endif
+
+static const struct l3mdev_ops vrf_l3mdev_ops = {
+	.l3mdev_fib_table	= vrf_fib_table,
+	.l3mdev_get_rtable	= vrf_get_rtable,
+	.l3mdev_get_saddr	= vrf_get_saddr,
+#if IS_ENABLED(CONFIG_IPV6)
+	.l3mdev_get_rt6_dst	= vrf_get_rt6_dst,
+#endif
+};
+
 static void vrf_get_drvinfo(struct net_device *dev,
 			    struct ethtool_drvinfo *info)
 {
@@ -546,6 +873,7 @@
 
 	/* Initialize the device structure. */
 	dev->netdev_ops = &vrf_netdev_ops;
+	dev->l3mdev_ops = &vrf_l3mdev_ops;
 	dev->ethtool_ops = &vrf_ethtool_ops;
 	dev->destructor = free_netdev;
 
@@ -572,10 +900,6 @@
 
 static void vrf_dellink(struct net_device *dev, struct list_head *head)
 {
-	struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
-
-	RCU_INIT_POINTER(dev->vrf_ptr, NULL);
-	kfree_rcu(vrf_ptr, rcu);
 	unregister_netdevice_queue(dev, head);
 }
 
@@ -583,7 +907,6 @@
 		       struct nlattr *tb[], struct nlattr *data[])
 {
 	struct net_vrf *vrf = netdev_priv(dev);
-	struct net_vrf_dev *vrf_ptr;
 	int err;
 
 	if (!data || !data[IFLA_VRF_TABLE])
@@ -591,26 +914,15 @@
 
 	vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
 
-	dev->priv_flags |= IFF_VRF_MASTER;
-
-	err = -ENOMEM;
-	vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL);
-	if (!vrf_ptr)
-		goto out_fail;
-
-	vrf_ptr->ifindex = dev->ifindex;
-	vrf_ptr->tb_id = vrf->tb_id;
+	dev->priv_flags |= IFF_L3MDEV_MASTER;
 
 	err = register_netdevice(dev);
 	if (err < 0)
 		goto out_fail;
 
-	rcu_assign_pointer(dev->vrf_ptr, vrf_ptr);
-
 	return 0;
 
 out_fail:
-	kfree(vrf_ptr);
 	free_netdev(dev);
 	return err;
 }
@@ -654,10 +966,9 @@
 
 	/* only care about unregister events to drop slave references */
 	if (event == NETDEV_UNREGISTER) {
-		struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
 		struct net_device *vrf_dev;
 
-		if (!vrf_ptr || netif_is_vrf(dev))
+		if (!netif_is_l3_slave(dev))
 			goto out;
 
 		vrf_dev = netdev_master_upper_dev_get(dev);
@@ -684,6 +995,10 @@
 	if (!vrf_dst_ops.kmem_cachep)
 		return -ENOMEM;
 
+	rc = init_dst_ops6_kmem_cachep();
+	if (rc != 0)
+		goto error2;
+
 	register_netdevice_notifier(&vrf_notifier_block);
 
 	rc = rtnl_link_register(&vrf_link_ops);
@@ -694,6 +1009,8 @@
 
 error:
 	unregister_netdevice_notifier(&vrf_notifier_block);
+	free_dst_ops6_kmem_cachep();
+error2:
 	kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
 	return rc;
 }
@@ -703,6 +1020,7 @@
 	rtnl_link_unregister(&vrf_link_ops);
 	unregister_netdevice_notifier(&vrf_notifier_block);
 	kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
+	free_dst_ops6_kmem_cachep();
 }
 
 module_init(vrf_init_module);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ce704df..6369a57 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2360,6 +2360,46 @@
 	return 0;
 }
 
+static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
+				struct ip_tunnel_info *info,
+				__be16 sport, __be16 dport)
+{
+	struct vxlan_dev *vxlan = netdev_priv(dev);
+	struct rtable *rt;
+	struct flowi4 fl4;
+
+	memset(&fl4, 0, sizeof(fl4));
+	fl4.flowi4_tos = RT_TOS(info->key.tos);
+	fl4.flowi4_mark = skb->mark;
+	fl4.flowi4_proto = IPPROTO_UDP;
+	fl4.daddr = info->key.u.ipv4.dst;
+
+	rt = ip_route_output_key(vxlan->net, &fl4);
+	if (IS_ERR(rt))
+		return PTR_ERR(rt);
+	ip_rt_put(rt);
+
+	info->key.u.ipv4.src = fl4.saddr;
+	info->key.tp_src = sport;
+	info->key.tp_dst = dport;
+	return 0;
+}
+
+static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+	struct vxlan_dev *vxlan = netdev_priv(dev);
+	struct ip_tunnel_info *info = skb_tunnel_info(skb);
+	__be16 sport, dport;
+
+	sport = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+				  vxlan->cfg.port_max, true);
+	dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
+
+	if (ip_tunnel_info_af(info) == AF_INET)
+		return egress_ipv4_tun_info(dev, skb, info, sport, dport);
+	return -EINVAL;
+}
+
 static const struct net_device_ops vxlan_netdev_ops = {
 	.ndo_init		= vxlan_init,
 	.ndo_uninit		= vxlan_uninit,
@@ -2374,6 +2414,7 @@
 	.ndo_fdb_add		= vxlan_fdb_add,
 	.ndo_fdb_del		= vxlan_fdb_delete,
 	.ndo_fdb_dump		= vxlan_fdb_dump,
+	.ndo_fill_metadata_dst	= vxlan_fill_metadata_dst,
 };
 
 /* Info for udev, that this is a virtual tunnel endpoint */
@@ -2794,11 +2835,10 @@
 	struct vxlan_config conf;
 	int err;
 
-	if (!data[IFLA_VXLAN_ID])
-		return -EINVAL;
-
 	memset(&conf, 0, sizeof(conf));
-	conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+
+	if (data[IFLA_VXLAN_ID])
+		conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
 
 	if (data[IFLA_VXLAN_GROUP]) {
 		conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index a63ab2e..f9f9422 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -214,8 +214,6 @@
 
 	  If you choose to build a module, it'll be called rndis_wlan.
 
-source "drivers/net/wireless/rtl818x/Kconfig"
-
 config ADM8211
 	tristate "ADMtek ADM8211 support"
 	depends on MAC80211 && PCI
@@ -243,6 +241,8 @@
 
 	  Thanks to Infineon-ADMtek for their support of this driver.
 
+source "drivers/net/wireless/realtek/rtl818x/Kconfig"
+
 config MAC80211_HWSIM
 	tristate "Simulated radio testing tool for mac80211"
 	depends on MAC80211
@@ -278,7 +278,8 @@
 source "drivers/net/wireless/p54/Kconfig"
 source "drivers/net/wireless/rt2x00/Kconfig"
 source "drivers/net/wireless/mediatek/Kconfig"
-source "drivers/net/wireless/rtlwifi/Kconfig"
+source "drivers/net/wireless/realtek/rtlwifi/Kconfig"
+source "drivers/net/wireless/realtek/rtl8xxxu/Kconfig"
 source "drivers/net/wireless/ti/Kconfig"
 source "drivers/net/wireless/zd1211rw/Kconfig"
 source "drivers/net/wireless/mwifiex/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 6b9e729..740fdd3 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -22,9 +22,7 @@
 obj-$(CONFIG_B43)		+= b43/
 obj-$(CONFIG_B43LEGACY)		+= b43legacy/
 obj-$(CONFIG_ZD1211RW)		+= zd1211rw/
-obj-$(CONFIG_RTL8180)		+= rtl818x/
-obj-$(CONFIG_RTL8187)		+= rtl818x/
-obj-$(CONFIG_RTLWIFI)		+= rtlwifi/
+obj-$(CONFIG_WLAN)		+= realtek/
 
 # 16-bit wireless PCMCIA client drivers
 obj-$(CONFIG_PCMCIA_RAYCS)	+= ray_cs.o
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index d0c97c2..17c40f0 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1231,12 +1231,13 @@
 	dma_addr_t		shared_dma;
 	pm_message_t		power;
 	SsidRid			*SSID;
-	APListRid		*APList;
+	APListRid		APList;
 #define	PCI_SHARED_LEN		2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE
 	char			proc_name[IFNAMSIZ];
 
 	int			wep_capable;
 	int			max_wep_idx;
+	int			last_auth;
 
 	/* WPA-related stuff */
 	unsigned int bssListFirst;
@@ -1847,11 +1848,6 @@
 	return PC4500_readrid(ai, RID_STATUS, statr, sizeof(*statr), lock);
 }
 
-static int readAPListRid(struct airo_info *ai, APListRid *aplr)
-{
-	return PC4500_readrid(ai, RID_APLIST, aplr, sizeof(*aplr), 1);
-}
-
 static int writeAPListRid(struct airo_info *ai, APListRid *aplr, int lock)
 {
 	return PC4500_writerid(ai, RID_APLIST, aplr, sizeof(*aplr), lock);
@@ -2412,7 +2408,6 @@
 
 	kfree(ai->flash);
 	kfree(ai->rssi);
-	kfree(ai->APList);
 	kfree(ai->SSID);
 	if (freeres) {
 		/* PCMCIA frees this stuff, so only for PCI and ISA */
@@ -2808,6 +2803,7 @@
 	init_waitqueue_head (&ai->thr_wait);
 	ai->tfm = NULL;
 	add_airo_dev(ai);
+	ai->APList.len = cpu_to_le16(sizeof(struct APListRid));
 
 	if (airo_networks_allocate (ai))
 		goto err_out_free;
@@ -3041,6 +3037,11 @@
 	}
 
 out:
+	/* write APList back (we cleared it in airo_set_scan) */
+	disable_MAC(ai, 2);
+	writeAPListRid(ai, &ai->APList, 0);
+	enable_MAC(ai, 0);
+
 	ai->scan_timeout = 0;
 	clear_bit(JOB_SCAN_RESULTS, &ai->jobs);
 	up(&ai->sem);
@@ -3266,6 +3267,7 @@
 			wake_up_interruptible(&ai->thr_wait);
 		} else
 			airo_send_event(ai->dev);
+		netif_carrier_on(ai->dev);
 	} else if (!scan_forceloss) {
 		if (auto_wep && !ai->expires) {
 			ai->expires = RUN_AT(3*HZ);
@@ -3276,6 +3278,9 @@
 		eth_zero_addr(wrqu.ap_addr.sa_data);
 		wrqu.ap_addr.sa_family = ARPHRD_ETHER;
 		wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL);
+		netif_carrier_off(ai->dev);
+	} else {
+		netif_carrier_off(ai->dev);
 	}
 }
 
@@ -3608,16 +3613,18 @@
         Cmd cmd;
 	Resp rsp;
 
-	if (lock && down_interruptible(&ai->sem))
+	if (lock == 1 && down_interruptible(&ai->sem))
 		return;
 
 	if (test_bit(FLAG_ENABLED, &ai->flags)) {
+		if (lock != 2) /* lock == 2 means don't disable carrier */
+			netif_carrier_off(ai->dev);
 		memset(&cmd, 0, sizeof(cmd));
 		cmd.cmd = MAC_DISABLE; // disable in case already enabled
 		issuecommand(ai, &cmd, &rsp);
 		clear_bit(FLAG_ENABLED, &ai->flags);
 	}
-	if (lock)
+	if (lock == 1)
 		up(&ai->sem);
 }
 
@@ -3786,6 +3793,16 @@
 	}
 }
 
+static inline void set_auth_type(struct airo_info *local, int auth_type)
+{
+	local->config.authType = auth_type;
+	/* Cache the last auth type used (of AUTH_OPEN and AUTH_ENCRYPT).
+	 * Used by airo_set_auth()
+	 */
+	if (auth_type == AUTH_OPEN || auth_type == AUTH_ENCRYPT)
+		local->last_auth = auth_type;
+}
+
 static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
 {
 	Cmd cmd;
@@ -3836,8 +3853,6 @@
 		tdsRssiRid rssi_rid;
 		CapabilityRid cap_rid;
 
-		kfree(ai->APList);
-		ai->APList = NULL;
 		kfree(ai->SSID);
 		ai->SSID = NULL;
 		// general configuration (read/modify/write)
@@ -3862,7 +3877,7 @@
 						"level scale");
 		}
 		ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
-		ai->config.authType = AUTH_OPEN;
+		set_auth_type(ai, AUTH_OPEN);
 		ai->config.modulation = MOD_CCK;
 
 		if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
@@ -4880,13 +4895,13 @@
 			line += 5;
 			switch( line[0] ) {
 			case 's':
-				ai->config.authType = AUTH_SHAREDKEY;
+				set_auth_type(ai, AUTH_SHAREDKEY);
 				break;
 			case 'e':
-				ai->config.authType = AUTH_ENCRYPT;
+				set_auth_type(ai, AUTH_ENCRYPT);
 				break;
 			default:
-				ai->config.authType = AUTH_OPEN;
+				set_auth_type(ai, AUTH_OPEN);
 				break;
 			}
 			set_bit (FLAG_COMMIT, &ai->flags);
@@ -5114,31 +5129,31 @@
 	struct proc_data *data = file->private_data;
 	struct net_device *dev = PDE_DATA(inode);
 	struct airo_info *ai = dev->ml_priv;
-	APListRid APList_rid;
+	APListRid *APList_rid = &ai->APList;
 	int i;
 
 	if ( !data->writelen ) return;
 
-	memset( &APList_rid, 0, sizeof(APList_rid) );
-	APList_rid.len = cpu_to_le16(sizeof(APList_rid));
+	memset(APList_rid, 0, sizeof(*APList_rid));
+	APList_rid->len = cpu_to_le16(sizeof(*APList_rid));
 
 	for( i = 0; i < 4 && data->writelen >= (i+1)*6*3; i++ ) {
 		int j;
 		for( j = 0; j < 6*3 && data->wbuffer[j+i*6*3]; j++ ) {
 			switch(j%3) {
 			case 0:
-				APList_rid.ap[i][j/3]=
+				APList_rid->ap[i][j/3]=
 					hex_to_bin(data->wbuffer[j+i*6*3])<<4;
 				break;
 			case 1:
-				APList_rid.ap[i][j/3]|=
+				APList_rid->ap[i][j/3]|=
 					hex_to_bin(data->wbuffer[j+i*6*3]);
 				break;
 			}
 		}
 	}
 	disable_MAC(ai, 1);
-	writeAPListRid(ai, &APList_rid, 1);
+	writeAPListRid(ai, APList_rid, 1);
 	enable_MAC(ai, 1);
 }
 
@@ -5392,7 +5407,7 @@
 	struct airo_info *ai = dev->ml_priv;
 	int i;
 	char *ptr;
-	APListRid APList_rid;
+	APListRid *APList_rid = &ai->APList;
 
 	if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
 		return -ENOMEM;
@@ -5410,13 +5425,12 @@
 	}
 	data->on_close = proc_APList_on_close;
 
-	readAPListRid(ai, &APList_rid);
 	ptr = data->rbuffer;
 	for( i = 0; i < 4; i++ ) {
 // We end when we find a zero MAC
-		if ( !*(int*)APList_rid.ap[i] &&
-		     !*(int*)&APList_rid.ap[i][2]) break;
-		ptr += sprintf(ptr, "%pM\n", APList_rid.ap[i]);
+		if ( !*(int*)APList_rid->ap[i] &&
+		     !*(int*)&APList_rid->ap[i][2]) break;
+		ptr += sprintf(ptr, "%pM\n", APList_rid->ap[i]);
 	}
 	if (i==0) ptr += sprintf(ptr, "Not using specific APs\n");
 
@@ -5580,15 +5594,10 @@
 	Cmd cmd;
 	Resp rsp;
 
-	if (!ai->APList)
-		ai->APList = kmalloc(sizeof(APListRid), GFP_KERNEL);
-	if (!ai->APList)
-		return -ENOMEM;
 	if (!ai->SSID)
 		ai->SSID = kmalloc(sizeof(SsidRid), GFP_KERNEL);
 	if (!ai->SSID)
 		return -ENOMEM;
-	readAPListRid(ai, ai->APList);
 	readSsidRid(ai, ai->SSID);
 	memset(&cmd, 0, sizeof(cmd));
 	/* the lock will be released at the end of the resume callback */
@@ -5636,11 +5645,7 @@
 		kfree(ai->SSID);
 		ai->SSID = NULL;
 	}
-	if (ai->APList) {
-		writeAPListRid(ai, ai->APList, 0);
-		kfree(ai->APList);
-		ai->APList = NULL;
-	}
+	writeAPListRid(ai, &ai->APList, 0);
 	writeConfigRid(ai, 0);
 	enable_MAC(ai, 0);
 	ai->power = PMSG_ON;
@@ -5938,7 +5943,7 @@
 	struct airo_info *local = dev->ml_priv;
 	Cmd cmd;
 	Resp rsp;
-	APListRid APList_rid;
+	APListRid *APList_rid = &local->APList;
 
 	if (awrq->sa_family != ARPHRD_ETHER)
 		return -EINVAL;
@@ -5951,11 +5956,11 @@
 		issuecommand(local, &cmd, &rsp);
 		up(&local->sem);
 	} else {
-		memset(&APList_rid, 0, sizeof(APList_rid));
-		APList_rid.len = cpu_to_le16(sizeof(APList_rid));
-		memcpy(APList_rid.ap[0], awrq->sa_data, ETH_ALEN);
+		memset(APList_rid, 0, sizeof(*APList_rid));
+		APList_rid->len = cpu_to_le16(sizeof(*APList_rid));
+		memcpy(APList_rid->ap[0], awrq->sa_data, ETH_ALEN);
 		disable_MAC(local, 1);
-		writeAPListRid(local, &APList_rid, 1);
+		writeAPListRid(local, APList_rid, 1);
 		enable_MAC(local, 1);
 	}
 	return 0;
@@ -6368,9 +6373,8 @@
 		 * should be enabled (user may turn it off later)
 		 * This is also how "iwconfig ethX key on" works */
 		if((index == current_index) && (key.len > 0) &&
-		   (local->config.authType == AUTH_OPEN)) {
-			local->config.authType = AUTH_ENCRYPT;
-		}
+		   (local->config.authType == AUTH_OPEN))
+			set_auth_type(local, AUTH_ENCRYPT);
 	} else {
 		/* Do we want to just set the transmit key index ? */
 		int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
@@ -6389,12 +6393,12 @@
 		}
 	}
 	/* Read the flags */
-	if(dwrq->flags & IW_ENCODE_DISABLED)
-		local->config.authType = AUTH_OPEN;	// disable encryption
+	if (dwrq->flags & IW_ENCODE_DISABLED)
+		set_auth_type(local, AUTH_OPEN);	/* disable encryption */
 	if(dwrq->flags & IW_ENCODE_RESTRICTED)
-		local->config.authType = AUTH_SHAREDKEY;	// Only Both
-	if(dwrq->flags & IW_ENCODE_OPEN)
-		local->config.authType = AUTH_ENCRYPT;	// Only Wep
+		set_auth_type(local, AUTH_SHAREDKEY);	/* Only Both */
+	if (dwrq->flags & IW_ENCODE_OPEN)
+		set_auth_type(local, AUTH_ENCRYPT);	/* Only Wep */
 	/* Commit the changes to flags if needed */
 	if (local->config.authType != currentAuthType)
 		set_bit (FLAG_COMMIT, &local->flags);
@@ -6549,12 +6553,12 @@
 	}
 
 	/* Read the flags */
-	if(encoding->flags & IW_ENCODE_DISABLED)
-		local->config.authType = AUTH_OPEN;	// disable encryption
+	if (encoding->flags & IW_ENCODE_DISABLED)
+		set_auth_type(local, AUTH_OPEN);	/* disable encryption */
 	if(encoding->flags & IW_ENCODE_RESTRICTED)
-		local->config.authType = AUTH_SHAREDKEY;	// Only Both
-	if(encoding->flags & IW_ENCODE_OPEN)
-		local->config.authType = AUTH_ENCRYPT;	// Only Wep
+		set_auth_type(local, AUTH_SHAREDKEY);	/* Only Both */
+	if (encoding->flags & IW_ENCODE_OPEN)
+		set_auth_type(local, AUTH_ENCRYPT);
 	/* Commit the changes to flags if needed */
 	if (local->config.authType != currentAuthType)
 		set_bit (FLAG_COMMIT, &local->flags);
@@ -6659,9 +6663,9 @@
 		if (param->value) {
 			/* Only change auth type if unencrypted */
 			if (currentAuthType == AUTH_OPEN)
-				local->config.authType = AUTH_ENCRYPT;
+				set_auth_type(local, AUTH_ENCRYPT);
 		} else {
-			local->config.authType = AUTH_OPEN;
+			set_auth_type(local, AUTH_OPEN);
 		}
 
 		/* Commit the changes to flags if needed */
@@ -6670,13 +6674,14 @@
 		break;
 
 	case IW_AUTH_80211_AUTH_ALG: {
-			/* FIXME: What about AUTH_OPEN?  This API seems to
-			 * disallow setting our auth to AUTH_OPEN.
-			 */
 			if (param->value & IW_AUTH_ALG_SHARED_KEY) {
-				local->config.authType = AUTH_SHAREDKEY;
+				set_auth_type(local, AUTH_SHAREDKEY);
 			} else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
-				local->config.authType = AUTH_ENCRYPT;
+				/* We don't know here if WEP open system or
+				 * unencrypted mode was requested - so use the
+				 * last mode (of these two) used last time
+				 */
+				set_auth_type(local, local->last_auth);
 			} else
 				return -EINVAL;
 
@@ -7217,6 +7222,7 @@
 	Cmd cmd;
 	Resp rsp;
 	int wake = 0;
+	APListRid APList_rid_empty;
 
 	/* Note : you may have realised that, as this is a SET operation,
 	 * this is privileged and therefore a normal user can't
@@ -7234,6 +7240,13 @@
 	if (ai->scan_timeout > 0)
 		goto out;
 
+	/* Clear APList as it affects scan results */
+	memset(&APList_rid_empty, 0, sizeof(APList_rid_empty));
+	APList_rid_empty.len = cpu_to_le16(sizeof(APList_rid_empty));
+	disable_MAC(ai, 2);
+	writeAPListRid(ai, &APList_rid_empty, 0);
+	enable_MAC(ai, 0);
+
 	/* Initiate a scan command */
 	ai->scan_timeout = RUN_AT(3*HZ);
 	memset(&cmd, 0, sizeof(cmd));
@@ -7489,10 +7502,8 @@
 	 * parameters. It's now time to commit them in the card */
 	disable_MAC(local, 1);
 	if (test_bit (FLAG_RESET, &local->flags)) {
-		APListRid APList_rid;
 		SsidRid SSID_rid;
 
-		readAPListRid(local, &APList_rid);
 		readSsidRid(local, &SSID_rid);
 		if (test_bit(FLAG_MPI,&local->flags))
 			setup_card(local, dev->dev_addr, 1 );
@@ -7500,7 +7511,7 @@
 			reset_airo_card(dev);
 		disable_MAC(local, 1);
 		writeSsidRid(local, &SSID_rid, 1);
-		writeAPListRid(local, &APList_rid, 1);
+		writeAPListRid(local, &local->APList, 1);
 	}
 	if (down_interruptible(&local->sem))
 		return -ERESTARTSYS;
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index df7c761..7d3231a 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -82,6 +82,16 @@
 
 #define BMI_NVRAM_SEG_NAME_SZ 16
 
+#define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10
+
+#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK   0x7c00
+#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB    10
+
+#define ATH10K_BMI_CHIP_ID_FROM_OTP_MASK    0x18000
+#define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB     15
+
+#define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff
+
 struct bmi_cmd {
 	__le32 id; /* enum bmi_cmd_id */
 	union {
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index cf28fbe..edf3629 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -274,7 +274,7 @@
 {
 	struct ath10k *ar = ce_state->ar;
 	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
-	struct ce_desc *desc, *sdesc;
+	struct ce_desc *desc, sdesc;
 	unsigned int nentries_mask = src_ring->nentries_mask;
 	unsigned int sw_index = src_ring->sw_index;
 	unsigned int write_index = src_ring->write_index;
@@ -294,7 +294,6 @@
 
 	desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
 				   write_index);
-	sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
 
 	desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
 
@@ -303,11 +302,11 @@
 	if (flags & CE_SEND_FLAG_BYTE_SWAP)
 		desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
 
-	sdesc->addr   = __cpu_to_le32(buffer);
-	sdesc->nbytes = __cpu_to_le16(nbytes);
-	sdesc->flags  = __cpu_to_le16(desc_flags);
+	sdesc.addr   = __cpu_to_le32(buffer);
+	sdesc.nbytes = __cpu_to_le16(nbytes);
+	sdesc.flags  = __cpu_to_le16(desc_flags);
 
-	*desc = *sdesc;
+	*desc = sdesc;
 
 	src_ring->per_transfer_context[write_index] = per_transfer_context;
 
@@ -413,7 +412,7 @@
 	lockdep_assert_held(&ar_pci->ce_lock);
 
 	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
-		return -EIO;
+		return -ENOSPC;
 
 	desc->addr = __cpu_to_le32(paddr);
 	desc->nbytes = 0;
@@ -579,17 +578,13 @@
  * The caller takes responsibility for any necessary locking.
  */
 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
-					 void **per_transfer_contextp,
-					 u32 *bufferp,
-					 unsigned int *nbytesp,
-					 unsigned int *transfer_idp)
+					 void **per_transfer_contextp)
 {
 	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
 	u32 ctrl_addr = ce_state->ctrl_addr;
 	struct ath10k *ar = ce_state->ar;
 	unsigned int nentries_mask = src_ring->nentries_mask;
 	unsigned int sw_index = src_ring->sw_index;
-	struct ce_desc *sdesc, *sbase;
 	unsigned int read_index;
 
 	if (src_ring->hw_index == sw_index) {
@@ -614,15 +609,6 @@
 	if (read_index == sw_index)
 		return -EIO;
 
-	sbase = src_ring->shadow_base;
-	sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
-
-	/* Return data from completed source descriptor */
-	*bufferp = __le32_to_cpu(sdesc->addr);
-	*nbytesp = __le16_to_cpu(sdesc->nbytes);
-	*transfer_idp = MS(__le16_to_cpu(sdesc->flags),
-			   CE_DESC_FLAGS_META_DATA);
-
 	if (per_transfer_contextp)
 		*per_transfer_contextp =
 			src_ring->per_transfer_context[sw_index];
@@ -697,10 +683,7 @@
 }
 
 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
-				  void **per_transfer_contextp,
-				  u32 *bufferp,
-				  unsigned int *nbytesp,
-				  unsigned int *transfer_idp)
+				  void **per_transfer_contextp)
 {
 	struct ath10k *ar = ce_state->ar;
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -708,9 +691,7 @@
 
 	spin_lock_bh(&ar_pci->ce_lock);
 	ret = ath10k_ce_completed_send_next_nolock(ce_state,
-						   per_transfer_contextp,
-						   bufferp, nbytesp,
-						   transfer_idp);
+						   per_transfer_contextp);
 	spin_unlock_bh(&ar_pci->ce_lock);
 
 	return ret;
@@ -940,27 +921,6 @@
 			src_ring->base_addr_ce_space_unaligned,
 			CE_DESC_RING_ALIGN);
 
-	/*
-	 * Also allocate a shadow src ring in regular
-	 * mem to use for faster access.
-	 */
-	src_ring->shadow_base_unaligned =
-		kmalloc((nentries * sizeof(struct ce_desc) +
-			 CE_DESC_RING_ALIGN), GFP_KERNEL);
-	if (!src_ring->shadow_base_unaligned) {
-		dma_free_coherent(ar->dev,
-				  (nentries * sizeof(struct ce_desc) +
-				   CE_DESC_RING_ALIGN),
-				  src_ring->base_addr_owner_space,
-				  src_ring->base_addr_ce_space);
-		kfree(src_ring);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	src_ring->shadow_base = PTR_ALIGN(
-			src_ring->shadow_base_unaligned,
-			CE_DESC_RING_ALIGN);
-
 	return src_ring;
 }
 
@@ -1076,9 +1036,7 @@
 }
 
 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
-			 const struct ce_attr *attr,
-			 void (*send_cb)(struct ath10k_ce_pipe *),
-			 void (*recv_cb)(struct ath10k_ce_pipe *))
+			 const struct ce_attr *attr)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
@@ -1104,10 +1062,10 @@
 	ce_state->src_sz_max = attr->src_sz_max;
 
 	if (attr->src_nentries)
-		ce_state->send_cb = send_cb;
+		ce_state->send_cb = attr->send_cb;
 
 	if (attr->dest_nentries)
-		ce_state->recv_cb = recv_cb;
+		ce_state->recv_cb = attr->recv_cb;
 
 	if (attr->src_nentries) {
 		ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
@@ -1141,7 +1099,6 @@
 	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
 
 	if (ce_state->src_ring) {
-		kfree(ce_state->src_ring->shadow_base_unaligned);
 		dma_free_coherent(ar->dev,
 				  (ce_state->src_ring->nentries *
 				   sizeof(struct ce_desc) +
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 5c903e15..47b734c 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -100,12 +100,6 @@
 
 	/* CE address space */
 	u32 base_addr_ce_space;
-	/*
-	 * Start of shadow copy of descriptors, within regular memory.
-	 * Aligned to descriptor-size boundary.
-	 */
-	void *shadow_base_unaligned;
-	struct ce_desc *shadow_base;
 
 	/* keep last */
 	void *per_transfer_context[0];
@@ -192,16 +186,10 @@
  * Pops 1 completed send buffer from Source ring.
  */
 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
-				  void **per_transfer_contextp,
-				  u32 *bufferp,
-				  unsigned int *nbytesp,
-				  unsigned int *transfer_idp);
+				  void **per_transfer_contextp);
 
 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
-					 void **per_transfer_contextp,
-					 u32 *bufferp,
-					 unsigned int *nbytesp,
-					 unsigned int *transfer_idp);
+					 void **per_transfer_contextp);
 
 /*==================CE Engine Initialization=======================*/
 
@@ -209,9 +197,7 @@
 			const struct ce_attr *attr);
 void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
 int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
-			 const struct ce_attr *attr,
-			 void (*send_cb)(struct ath10k_ce_pipe *),
-			 void (*recv_cb)(struct ath10k_ce_pipe *));
+			 const struct ce_attr *attr);
 void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
 
 /*==================CE Engine Shutdown=======================*/
@@ -277,6 +263,9 @@
 
 	/* #entries in destination ring - Must be a power of 2 */
 	unsigned int dest_nentries;
+
+	void (*send_cb)(struct ath10k_ce_pipe *);
+	void (*recv_cb)(struct ath10k_ce_pipe *);
 };
 
 #define SR_BA_ADDRESS		0x0000
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index b87b986..aa9bd92 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -34,16 +34,19 @@
 static unsigned int ath10k_cryptmode_param;
 static bool uart_print;
 static bool skip_otp;
+static bool rawmode;
 
 module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
 module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
 module_param(uart_print, bool, 0644);
 module_param(skip_otp, bool, 0644);
+module_param(rawmode, bool, 0644);
 
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 MODULE_PARM_DESC(uart_print, "Uart target debugging");
 MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
 MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
+MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
 
 static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 	{
@@ -54,6 +57,7 @@
 		.has_shifted_cc_wraparound = true,
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
+		.max_probe_resp_desc_thres = 0,
 		.fw = {
 			.dir = QCA988X_HW_2_0_FW_DIR,
 			.fw = QCA988X_HW_2_0_FW_FILE,
@@ -70,6 +74,7 @@
 		.uart_pin = 6,
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
+		.max_probe_resp_desc_thres = 0,
 		.fw = {
 			.dir = QCA6174_HW_2_1_FW_DIR,
 			.fw = QCA6174_HW_2_1_FW_FILE,
@@ -86,6 +91,7 @@
 		.uart_pin = 6,
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
+		.max_probe_resp_desc_thres = 0,
 		.fw = {
 			.dir = QCA6174_HW_3_0_FW_DIR,
 			.fw = QCA6174_HW_3_0_FW_FILE,
@@ -102,6 +108,7 @@
 		.uart_pin = 6,
 		.otp_exe_param = 0,
 		.channel_counters_freq_hz = 88000,
+		.max_probe_resp_desc_thres = 0,
 		.fw = {
 			/* uses same binaries as hw3.0 */
 			.dir = QCA6174_HW_3_0_FW_DIR,
@@ -120,6 +127,7 @@
 		.otp_exe_param = 0x00000700,
 		.continuous_frag_desc = true,
 		.channel_counters_freq_hz = 150000,
+		.max_probe_resp_desc_thres = 24,
 		.fw = {
 			.dir = QCA99X0_HW_2_0_FW_DIR,
 			.fw = QCA99X0_HW_2_0_FW_FILE,
@@ -129,6 +137,21 @@
 			.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
 		},
 	},
+	{
+		.id = QCA9377_HW_1_0_DEV_VERSION,
+		.name = "qca9377 hw1.0",
+		.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
+		.uart_pin = 7,
+		.otp_exe_param = 0,
+		.fw = {
+			.dir = QCA9377_HW_1_0_FW_DIR,
+			.fw = QCA9377_HW_1_0_FW_FILE,
+			.otp = QCA9377_HW_1_0_OTP_FILE,
+			.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
+			.board_size = QCA9377_BOARD_DATA_SZ,
+			.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
+		},
+	},
 };
 
 static const char *const ath10k_core_fw_feature_str[] = {
@@ -142,12 +165,18 @@
 	[ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp",
 	[ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
 	[ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
+	[ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode",
+	[ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca",
 };
 
 static unsigned int ath10k_core_get_fw_feature_str(char *buf,
 						   size_t buf_len,
 						   enum ath10k_fw_features feat)
 {
+	/* make sure that ath10k_core_fw_feature_str[] gets updated */
+	BUILD_BUG_ON(ARRAY_SIZE(ath10k_core_fw_feature_str) !=
+		     ATH10K_FW_FEATURE_COUNT);
+
 	if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) ||
 	    WARN_ON(!ath10k_core_fw_feature_str[feat])) {
 		return scnprintf(buf, buf_len, "bit%d", feat);
@@ -435,6 +464,56 @@
 	return ret;
 }
 
+static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
+{
+	u32 result, address;
+	u8 board_id, chip_id;
+	int ret;
+
+	address = ar->hw_params.patch_load_addr;
+
+	if (!ar->otp_data || !ar->otp_len) {
+		ath10k_warn(ar,
+			    "failed to retrieve board id because of invalid otp\n");
+		return -ENODATA;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot upload otp to 0x%x len %zd for board id\n",
+		   address, ar->otp_len);
+
+	ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+	if (ret) {
+		ath10k_err(ar, "could not write otp for board id check: %d\n",
+			   ret);
+		return ret;
+	}
+
+	ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EEPROM_BOARD_ID,
+				 &result);
+	if (ret) {
+		ath10k_err(ar, "could not execute otp for board id check: %d\n",
+			   ret);
+		return ret;
+	}
+
+	board_id = MS(result, ATH10K_BMI_BOARD_ID_FROM_OTP);
+	chip_id = MS(result, ATH10K_BMI_CHIP_ID_FROM_OTP);
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT,
+		   "boot get otp board id result 0x%08x board_id %d chip_id %d\n",
+		   result, board_id, chip_id);
+
+	if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0)
+		return -EOPNOTSUPP;
+
+	ar->id.bmi_ids_valid = true;
+	ar->id.bmi_board_id = board_id;
+	ar->id.bmi_chip_id = chip_id;
+
+	return 0;
+}
+
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
 	u32 result, address = ar->hw_params.patch_load_addr;
@@ -473,8 +552,8 @@
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
 
 	if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
-				   ar->fw_features))
-	    && result != 0) {
+				   ar->fw_features)) &&
+	    result != 0) {
 		ath10k_err(ar, "otp calibration failed: %d", result);
 		return -EINVAL;
 	}
@@ -497,7 +576,7 @@
 		data_len = ar->firmware_len;
 		mode_name = "normal";
 		ret = ath10k_swap_code_seg_configure(ar,
-				ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
+						     ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
 		if (ret) {
 			ath10k_err(ar, "failed to configure fw code swap: %d\n",
 				   ret);
@@ -505,8 +584,8 @@
 		}
 		break;
 	case ATH10K_FIRMWARE_MODE_UTF:
-		data = ar->testmode.utf->data;
-		data_len = ar->testmode.utf->size;
+		data = ar->testmode.utf_firmware_data;
+		data_len = ar->testmode.utf_firmware_len;
 		mode_name = "utf";
 		break;
 	default:
@@ -528,11 +607,18 @@
 	return ret;
 }
 
-static void ath10k_core_free_firmware_files(struct ath10k *ar)
+static void ath10k_core_free_board_files(struct ath10k *ar)
 {
 	if (!IS_ERR(ar->board))
 		release_firmware(ar->board);
 
+	ar->board = NULL;
+	ar->board_data = NULL;
+	ar->board_len = 0;
+}
+
+static void ath10k_core_free_firmware_files(struct ath10k *ar)
+{
 	if (!IS_ERR(ar->otp))
 		release_firmware(ar->otp);
 
@@ -544,10 +630,6 @@
 
 	ath10k_swap_code_seg_release(ar);
 
-	ar->board = NULL;
-	ar->board_data = NULL;
-	ar->board_len = 0;
-
 	ar->otp = NULL;
 	ar->otp_data = NULL;
 	ar->otp_len = 0;
@@ -557,7 +639,6 @@
 	ar->firmware_len = 0;
 
 	ar->cal_file = NULL;
-
 }
 
 static int ath10k_fetch_cal_file(struct ath10k *ar)
@@ -579,25 +660,7 @@
 	return 0;
 }
 
-static int ath10k_core_fetch_spec_board_file(struct ath10k *ar)
-{
-	char filename[100];
-
-	scnprintf(filename, sizeof(filename), "board-%s-%s.bin",
-		  ath10k_bus_str(ar->hif.bus), ar->spec_board_id);
-
-	ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
-	if (IS_ERR(ar->board))
-		return PTR_ERR(ar->board);
-
-	ar->board_data = ar->board->data;
-	ar->board_len = ar->board->size;
-	ar->spec_board_loaded = true;
-
-	return 0;
-}
-
-static int ath10k_core_fetch_generic_board_file(struct ath10k *ar)
+static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar)
 {
 	if (!ar->hw_params.fw.board) {
 		ath10k_err(ar, "failed to find board file fw entry\n");
@@ -612,35 +675,236 @@
 
 	ar->board_data = ar->board->data;
 	ar->board_len = ar->board->size;
-	ar->spec_board_loaded = false;
+
+	return 0;
+}
+
+static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
+					 const void *buf, size_t buf_len,
+					 const char *boardname)
+{
+	const struct ath10k_fw_ie *hdr;
+	bool name_match_found;
+	int ret, board_ie_id;
+	size_t board_ie_len;
+	const void *board_ie_data;
+
+	name_match_found = false;
+
+	/* go through ATH10K_BD_IE_BOARD_ elements */
+	while (buf_len > sizeof(struct ath10k_fw_ie)) {
+		hdr = buf;
+		board_ie_id = le32_to_cpu(hdr->id);
+		board_ie_len = le32_to_cpu(hdr->len);
+		board_ie_data = hdr->data;
+
+		buf_len -= sizeof(*hdr);
+		buf += sizeof(*hdr);
+
+		if (buf_len < ALIGN(board_ie_len, 4)) {
+			ath10k_err(ar, "invalid ATH10K_BD_IE_BOARD length: %zu < %zu\n",
+				   buf_len, ALIGN(board_ie_len, 4));
+			ret = -EINVAL;
+			goto out;
+		}
+
+		switch (board_ie_id) {
+		case ATH10K_BD_IE_BOARD_NAME:
+			ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "board name", "",
+					board_ie_data, board_ie_len);
+
+			if (board_ie_len != strlen(boardname))
+				break;
+
+			ret = memcmp(board_ie_data, boardname, strlen(boardname));
+			if (ret)
+				break;
+
+			name_match_found = true;
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "boot found match for name '%s'",
+				   boardname);
+			break;
+		case ATH10K_BD_IE_BOARD_DATA:
+			if (!name_match_found)
+				/* no match found */
+				break;
+
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "boot found board data for '%s'",
+				   boardname);
+
+			ar->board_data = board_ie_data;
+			ar->board_len = board_ie_len;
+
+			ret = 0;
+			goto out;
+		default:
+			ath10k_warn(ar, "unknown ATH10K_BD_IE_BOARD found: %d\n",
+				    board_ie_id);
+			break;
+		}
+
+		/* jump over the padding */
+		board_ie_len = ALIGN(board_ie_len, 4);
+
+		buf_len -= board_ie_len;
+		buf += board_ie_len;
+	}
+
+	/* no match found */
+	ret = -ENOENT;
+
+out:
+	return ret;
+}
+
+static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
+					      const char *boardname,
+					      const char *filename)
+{
+	size_t len, magic_len, ie_len;
+	struct ath10k_fw_ie *hdr;
+	const u8 *data;
+	int ret, ie_id;
+
+	ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
+	if (IS_ERR(ar->board))
+		return PTR_ERR(ar->board);
+
+	data = ar->board->data;
+	len = ar->board->size;
+
+	/* magic has extra null byte padded */
+	magic_len = strlen(ATH10K_BOARD_MAGIC) + 1;
+	if (len < magic_len) {
+		ath10k_err(ar, "failed to find magic value in %s/%s, file too short: %zu\n",
+			   ar->hw_params.fw.dir, filename, len);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (memcmp(data, ATH10K_BOARD_MAGIC, magic_len)) {
+		ath10k_err(ar, "found invalid board magic\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* magic is padded to 4 bytes */
+	magic_len = ALIGN(magic_len, 4);
+	if (len < magic_len) {
+		ath10k_err(ar, "failed: %s/%s too small to contain board data, len: %zu\n",
+			   ar->hw_params.fw.dir, filename, len);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	data += magic_len;
+	len -= magic_len;
+
+	while (len > sizeof(struct ath10k_fw_ie)) {
+		hdr = (struct ath10k_fw_ie *)data;
+		ie_id = le32_to_cpu(hdr->id);
+		ie_len = le32_to_cpu(hdr->len);
+
+		len -= sizeof(*hdr);
+		data = hdr->data;
+
+		if (len < ALIGN(ie_len, 4)) {
+			ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+				   ie_id, ie_len, len);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		switch (ie_id) {
+		case ATH10K_BD_IE_BOARD:
+			ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
+							    boardname);
+			if (ret == -ENOENT)
+				/* no match found, continue */
+				break;
+			else if (ret)
+				/* there was an error, bail out */
+				goto err;
+
+			/* board data found */
+			goto out;
+		}
+
+		/* jump over the padding */
+		ie_len = ALIGN(ie_len, 4);
+
+		len -= ie_len;
+		data += ie_len;
+	}
+
+out:
+	if (!ar->board_data || !ar->board_len) {
+		ath10k_err(ar,
+			   "failed to fetch board data for %s from %s/%s\n",
+			   ar->hw_params.fw.dir, boardname, filename);
+		ret = -ENODATA;
+		goto err;
+	}
+
+	return 0;
+
+err:
+	ath10k_core_free_board_files(ar);
+	return ret;
+}
+
+static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
+					 size_t name_len)
+{
+	if (ar->id.bmi_ids_valid) {
+		scnprintf(name, name_len,
+			  "bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
+			  ath10k_bus_str(ar->hif.bus),
+			  ar->id.bmi_chip_id,
+			  ar->id.bmi_board_id);
+		goto out;
+	}
+
+	scnprintf(name, name_len,
+		  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x",
+		  ath10k_bus_str(ar->hif.bus),
+		  ar->id.vendor, ar->id.device,
+		  ar->id.subsystem_vendor, ar->id.subsystem_device);
+
+out:
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name);
 
 	return 0;
 }
 
 static int ath10k_core_fetch_board_file(struct ath10k *ar)
 {
+	char boardname[100];
 	int ret;
 
-	if (strlen(ar->spec_board_id) > 0) {
-		ret = ath10k_core_fetch_spec_board_file(ar);
-		if (ret) {
-			ath10k_info(ar, "failed to load spec board file, falling back to generic: %d\n",
-				    ret);
-			goto generic;
-		}
-
-		ath10k_dbg(ar, ATH10K_DBG_BOOT, "found specific board file for %s\n",
-			   ar->spec_board_id);
-		return 0;
-	}
-
-generic:
-	ret = ath10k_core_fetch_generic_board_file(ar);
+	ret = ath10k_core_create_board_name(ar, boardname, sizeof(boardname));
 	if (ret) {
-		ath10k_err(ar, "failed to fetch generic board data: %d\n", ret);
+		ath10k_err(ar, "failed to create board name: %d", ret);
 		return ret;
 	}
 
+	ar->bd_api = 2;
+	ret = ath10k_core_fetch_board_data_api_n(ar, boardname,
+						 ATH10K_BOARD_API2_FILE);
+	if (!ret)
+		goto success;
+
+	ar->bd_api = 1;
+	ret = ath10k_core_fetch_board_data_api_1(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to fetch board data\n");
+		return ret;
+	}
+
+success:
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api);
 	return 0;
 }
 
@@ -872,12 +1136,6 @@
 	/* calibration file is optional, don't check for any errors */
 	ath10k_fetch_cal_file(ar);
 
-	ret = ath10k_core_fetch_board_file(ar);
-	if (ret) {
-		ath10k_err(ar, "failed to fetch board file: %d\n", ret);
-		return ret;
-	}
-
 	ar->fw_api = 5;
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
@@ -1117,6 +1375,15 @@
 	ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
 	ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
 
+	if (rawmode) {
+		if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+			      ar->fw_features)) {
+			ath10k_err(ar, "rawmode = 1 requires support from firmware");
+			return -EINVAL;
+		}
+		set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+	}
+
 	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
 		ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
 
@@ -1241,10 +1508,10 @@
 		goto err;
 
 	/* Some of of qca988x solutions are having global reset issue
-         * during target initialization. Bypassing PLL setting before
-         * downloading firmware and letting the SoC run on REF_CLK is
-         * fixing the problem. Corresponding firmware change is also needed
-         * to set the clock source once the target is initialized.
+	 * during target initialization. Bypassing PLL setting before
+	 * downloading firmware and letting the SoC run on REF_CLK is
+	 * fixing the problem. Corresponding firmware change is also needed
+	 * to set the clock source once the target is initialized.
 	 */
 	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
 		     ar->fw_features)) {
@@ -1478,6 +1745,19 @@
 		goto err_power_down;
 	}
 
+	ret = ath10k_core_get_board_id_from_otp(ar);
+	if (ret && ret != -EOPNOTSUPP) {
+		ath10k_err(ar, "failed to get board id from otp for qca99x0: %d\n",
+			   ret);
+		return ret;
+	}
+
+	ret = ath10k_core_fetch_board_file(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+		goto err_free_firmware_files;
+	}
+
 	ret = ath10k_core_init_firmware_features(ar);
 	if (ret) {
 		ath10k_err(ar, "fatal problem with firmware features: %d\n",
@@ -1605,6 +1885,7 @@
 	ath10k_testmode_destroy(ar);
 
 	ath10k_core_free_firmware_files(ar);
+	ath10k_core_free_board_files(ar);
 
 	ath10k_debug_unregister(ar);
 }
@@ -1635,6 +1916,7 @@
 		ar->hw_values = &qca988x_values;
 		break;
 	case ATH10K_HW_QCA6174:
+	case ATH10K_HW_QCA9377:
 		ar->regs = &qca6174_regs;
 		ar->hw_values = &qca6174_values;
 		break;
@@ -1714,6 +1996,7 @@
 	destroy_workqueue(ar->workqueue_aux);
 
 	ath10k_debug_destroy(ar);
+	ath10k_wmi_free_host_mem(ar);
 	ath10k_mac_destroy(ar);
 }
 EXPORT_SYMBOL(ath10k_core_destroy);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 1254214..4a23015 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -214,6 +214,7 @@
 	s32 hw_queued;
 	s32 hw_reaped;
 	s32 underrun;
+	u32 hw_paused;
 	s32 tx_abort;
 	s32 mpdus_requed;
 	u32 tx_ko;
@@ -226,6 +227,16 @@
 	u32 pdev_resets;
 	u32 phy_underrun;
 	u32 txop_ovf;
+	u32 seq_posted;
+	u32 seq_failed_queueing;
+	u32 seq_completed;
+	u32 seq_restarted;
+	u32 mu_seq_posted;
+	u32 mpdus_sw_flush;
+	u32 mpdus_hw_filter;
+	u32 mpdus_truncated;
+	u32 mpdus_ack_failed;
+	u32 mpdus_expired;
 
 	/* PDEV RX stats */
 	s32 mid_ppdu_route_change;
@@ -242,6 +253,7 @@
 	s32 phy_errs;
 	s32 phy_err_drop;
 	s32 mpdu_errs;
+	s32 rx_ovfl_errs;
 };
 
 struct ath10k_fw_stats {
@@ -250,6 +262,30 @@
 	struct list_head peers;
 };
 
+#define ATH10K_TPC_TABLE_TYPE_FLAG	1
+#define ATH10K_TPC_PREAM_TABLE_END	0xFFFF
+
+struct ath10k_tpc_table {
+	u32 pream_idx[WMI_TPC_RATE_MAX];
+	u8 rate_code[WMI_TPC_RATE_MAX];
+	char tpc_value[WMI_TPC_RATE_MAX][WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+};
+
+struct ath10k_tpc_stats {
+	u32 reg_domain;
+	u32 chan_freq;
+	u32 phy_mode;
+	u32 twice_antenna_reduction;
+	u32 twice_max_rd_power;
+	s32 twice_antenna_gain;
+	u32 power_limit;
+	u32 num_tx_chain;
+	u32 ctl;
+	u32 rate_max;
+	u8 flag[WMI_TPC_FLAG];
+	struct ath10k_tpc_table tpc_table[WMI_TPC_FLAG];
+};
+
 struct ath10k_dfs_stats {
 	u32 phy_errors;
 	u32 pulses_total;
@@ -378,6 +414,11 @@
 	struct ath10k_dfs_stats dfs_stats;
 	struct ath_dfs_pool_stats dfs_pool_stats;
 
+	/* used for tpc-dump storage, protected by data-lock */
+	struct ath10k_tpc_stats *tpc_stats;
+
+	struct completion tpc_complete;
+
 	/* protected by conf_mutex */
 	u32 fw_dbglog_mask;
 	u32 fw_dbglog_level;
@@ -468,6 +509,9 @@
 	 */
 	ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
 
+	/* Firmware Supports Adaptive CCA*/
+	ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA = 11,
+
 	/* keep last */
 	ATH10K_FW_FEATURE_COUNT,
 };
@@ -612,6 +656,11 @@
 
 		u32 channel_counters_freq_hz;
 
+		/* Mgmt tx descriptors threshold for limiting probe response
+		 * frames.
+		 */
+		u32 max_probe_resp_desc_thres;
+
 		struct ath10k_hw_params_fw {
 			const char *dir;
 			const char *fw;
@@ -642,10 +691,19 @@
 		struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
 	} swap;
 
-	char spec_board_id[100];
-	bool spec_board_loaded;
+	struct {
+		u32 vendor;
+		u32 device;
+		u32 subsystem_vendor;
+		u32 subsystem_device;
+
+		bool bmi_ids_valid;
+		u8 bmi_board_id;
+		u8 bmi_chip_id;
+	} id;
 
 	int fw_api;
+	int bd_api;
 	enum ath10k_cal_mode cal_mode;
 
 	struct {
@@ -687,8 +745,6 @@
 	int num_started_vdevs;
 
 	/* Protected by conf-mutex */
-	u8 supp_tx_chainmask;
-	u8 supp_rx_chainmask;
 	u8 cfg_tx_chainmask;
 	u8 cfg_rx_chainmask;
 
@@ -771,9 +827,12 @@
 	struct {
 		/* protected by conf_mutex */
 		const struct firmware *utf;
+		char utf_version[32];
+		const void *utf_firmware_data;
+		size_t utf_firmware_len;
 		DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
 		enum ath10k_fw_wmi_op_version orig_wmi_op_version;
-
+		enum ath10k_fw_wmi_op_version op_version;
 		/* protected by data_lock */
 		bool utf_monitor;
 	} testmode;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index bf033f4..6cc1aa3 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -125,19 +125,25 @@
 void ath10k_print_driver_info(struct ath10k *ar)
 {
 	char fw_features[128] = {};
+	char boardinfo[100];
 
 	ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
 
-	ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
+	if (ar->id.bmi_ids_valid)
+		scnprintf(boardinfo, sizeof(boardinfo), "bmi %d:%d",
+			  ar->id.bmi_chip_id, ar->id.bmi_board_id);
+	else
+		scnprintf(boardinfo, sizeof(boardinfo), "sub %04x:%04x",
+			  ar->id.subsystem_vendor, ar->id.subsystem_device);
+
+	ath10k_info(ar, "%s (0x%08x, 0x%08x %s) fw %s fwapi %d bdapi %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
 		    ar->hw_params.name,
 		    ar->target_version,
 		    ar->chip_id,
-		    (strlen(ar->spec_board_id) > 0 ? ", " : ""),
-		    ar->spec_board_id,
-		    (strlen(ar->spec_board_id) > 0 && !ar->spec_board_loaded
-		     ? " fallback" : ""),
+		    boardinfo,
 		    ar->hw->wiphy->fw_version,
 		    ar->fw_api,
+		    ar->bd_api,
 		    ar->htt.target_version_major,
 		    ar->htt.target_version_minor,
 		    ar->wmi.op_version,
@@ -285,28 +291,6 @@
 	spin_unlock_bh(&ar->data_lock);
 }
 
-static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
-{
-	struct ath10k_fw_stats_peer *i;
-	size_t num = 0;
-
-	list_for_each_entry(i, head, list)
-		++num;
-
-	return num;
-}
-
-static size_t ath10k_debug_fw_stats_num_vdevs(struct list_head *head)
-{
-	struct ath10k_fw_stats_vdev *i;
-	size_t num = 0;
-
-	list_for_each_entry(i, head, list)
-		++num;
-
-	return num;
-}
-
 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct ath10k_fw_stats stats = {};
@@ -343,8 +327,8 @@
 		goto free;
 	}
 
-	num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
-	num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
+	num_peers = ath10k_wmi_fw_stats_num_peers(&ar->debug.fw_stats.peers);
+	num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
 	is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
 		    !list_empty(&stats.pdevs));
 	is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
@@ -429,240 +413,6 @@
 	return 0;
 }
 
-/* FIXME: How to calculate the buffer size sanely? */
-#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
-
-static void ath10k_fw_stats_fill(struct ath10k *ar,
-				 struct ath10k_fw_stats *fw_stats,
-				 char *buf)
-{
-	unsigned int len = 0;
-	unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
-	const struct ath10k_fw_stats_pdev *pdev;
-	const struct ath10k_fw_stats_vdev *vdev;
-	const struct ath10k_fw_stats_peer *peer;
-	size_t num_peers;
-	size_t num_vdevs;
-	int i;
-
-	spin_lock_bh(&ar->data_lock);
-
-	pdev = list_first_entry_or_null(&fw_stats->pdevs,
-					struct ath10k_fw_stats_pdev, list);
-	if (!pdev) {
-		ath10k_warn(ar, "failed to get pdev stats\n");
-		goto unlock;
-	}
-
-	num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
-	num_vdevs = ath10k_debug_fw_stats_num_vdevs(&fw_stats->vdevs);
-
-	len += scnprintf(buf + len, buf_len - len, "\n");
-	len += scnprintf(buf + len, buf_len - len, "%30s\n",
-			 "ath10k PDEV stats");
-	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
-				 "=================");
-
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Channel noise floor", pdev->ch_noise_floor);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-			 "Channel TX power", pdev->chan_tx_power);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-			 "TX frame count", pdev->tx_frame_count);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-			 "RX frame count", pdev->rx_frame_count);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-			 "RX clear count", pdev->rx_clear_count);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-			 "Cycle count", pdev->cycle_count);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-			 "PHY error count", pdev->phy_err_count);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-			 "RTS bad count", pdev->rts_bad);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-			 "RTS good count", pdev->rts_good);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-			 "FCS bad count", pdev->fcs_bad);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-			 "No beacon count", pdev->no_beacons);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
-			 "MIB int count", pdev->mib_int_count);
-
-	len += scnprintf(buf + len, buf_len - len, "\n");
-	len += scnprintf(buf + len, buf_len - len, "%30s\n",
-			 "ath10k PDEV TX stats");
-	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
-				 "=================");
-
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "HTT cookies queued", pdev->comp_queued);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "HTT cookies disp.", pdev->comp_delivered);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "MSDU queued", pdev->msdu_enqued);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "MPDU queued", pdev->mpdu_enqued);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "MSDUs dropped", pdev->wmm_drop);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Local enqued", pdev->local_enqued);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Local freed", pdev->local_freed);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "HW queued", pdev->hw_queued);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "PPDUs reaped", pdev->hw_reaped);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Num underruns", pdev->underrun);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "PPDUs cleaned", pdev->tx_abort);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "MPDUs requed", pdev->mpdus_requed);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Excessive retries", pdev->tx_ko);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "HW rate", pdev->data_rc);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Sched self tiggers", pdev->self_triggers);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Dropped due to SW retries",
-			 pdev->sw_retry_failure);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Illegal rate phy errors",
-			 pdev->illgl_rate_phy_err);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Pdev continous xretry", pdev->pdev_cont_xretry);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "TX timeout", pdev->pdev_tx_timeout);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "PDEV resets", pdev->pdev_resets);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "PHY underrun", pdev->phy_underrun);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "MPDU is more than txop limit", pdev->txop_ovf);
-
-	len += scnprintf(buf + len, buf_len - len, "\n");
-	len += scnprintf(buf + len, buf_len - len, "%30s\n",
-			 "ath10k PDEV RX stats");
-	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
-				 "=================");
-
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Mid PPDU route change",
-			 pdev->mid_ppdu_route_change);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Tot. number of statuses", pdev->status_rcvd);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Extra frags on rings 0", pdev->r0_frags);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Extra frags on rings 1", pdev->r1_frags);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Extra frags on rings 2", pdev->r2_frags);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Extra frags on rings 3", pdev->r3_frags);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "MSDUs delivered to HTT", pdev->htt_msdus);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "MPDUs delivered to HTT", pdev->htt_mpdus);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "MSDUs delivered to stack", pdev->loc_msdus);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "MPDUs delivered to stack", pdev->loc_mpdus);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "Oversized AMSUs", pdev->oversize_amsdu);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "PHY errors", pdev->phy_errs);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "PHY errors drops", pdev->phy_err_drop);
-	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
-			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
-
-	len += scnprintf(buf + len, buf_len - len, "\n");
-	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
-			 "ath10k VDEV stats", num_vdevs);
-	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
-				 "=================");
-
-	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
-		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-				 "vdev id", vdev->vdev_id);
-		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-				 "beacon snr", vdev->beacon_snr);
-		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-				 "data snr", vdev->data_snr);
-		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-				 "num rx frames", vdev->num_rx_frames);
-		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-				 "num rts fail", vdev->num_rts_fail);
-		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-				 "num rts success", vdev->num_rts_success);
-		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-				 "num rx err", vdev->num_rx_err);
-		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-				 "num rx discard", vdev->num_rx_discard);
-		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-				 "num tx not acked", vdev->num_tx_not_acked);
-
-		for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
-			len += scnprintf(buf + len, buf_len - len,
-					"%25s [%02d] %u\n",
-					 "num tx frames", i,
-					 vdev->num_tx_frames[i]);
-
-		for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
-			len += scnprintf(buf + len, buf_len - len,
-					"%25s [%02d] %u\n",
-					 "num tx frames retries", i,
-					 vdev->num_tx_frames_retries[i]);
-
-		for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
-			len += scnprintf(buf + len, buf_len - len,
-					"%25s [%02d] %u\n",
-					 "num tx frames failures", i,
-					 vdev->num_tx_frames_failures[i]);
-
-		for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
-			len += scnprintf(buf + len, buf_len - len,
-					"%25s [%02d] 0x%08x\n",
-					 "tx rate history", i,
-					 vdev->tx_rate_history[i]);
-
-		for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
-			len += scnprintf(buf + len, buf_len - len,
-					"%25s [%02d] %u\n",
-					 "beacon rssi history", i,
-					 vdev->beacon_rssi_history[i]);
-
-		len += scnprintf(buf + len, buf_len - len, "\n");
-	}
-
-	len += scnprintf(buf + len, buf_len - len, "\n");
-	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
-			 "ath10k PEER stats", num_peers);
-	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
-				 "=================");
-
-	list_for_each_entry(peer, &fw_stats->peers, list) {
-		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
-				 "Peer MAC address", peer->peer_macaddr);
-		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-				 "Peer RSSI", peer->peer_rssi);
-		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-				 "Peer TX rate", peer->peer_tx_rate);
-		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
-				 "Peer RX rate", peer->peer_rx_rate);
-		len += scnprintf(buf + len, buf_len - len, "\n");
-	}
-
-unlock:
-	spin_unlock_bh(&ar->data_lock);
-
-	if (len >= buf_len)
-		buf[len - 1] = 0;
-	else
-		buf[len] = 0;
-}
-
 static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
 {
 	struct ath10k *ar = inode->i_private;
@@ -688,7 +438,12 @@
 		goto err_free;
 	}
 
-	ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
+	ret = ath10k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
+	if (ret) {
+		ath10k_warn(ar, "failed to fill fw stats: %d\n", ret);
+		goto err_free;
+	}
+
 	file->private_data = buf;
 
 	mutex_unlock(&ar->conf_mutex);
@@ -1843,6 +1598,233 @@
 	.llseek = default_llseek,
 };
 
+#define ATH10K_TPC_CONFIG_BUF_SIZE	(1024 * 1024)
+
+static int ath10k_debug_tpc_stats_request(struct ath10k *ar)
+{
+	int ret;
+	unsigned long time_left;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	reinit_completion(&ar->debug.tpc_complete);
+
+	ret = ath10k_wmi_pdev_get_tpc_config(ar, WMI_TPC_CONFIG_PARAM);
+	if (ret) {
+		ath10k_warn(ar, "failed to request tpc config: %d\n", ret);
+		return ret;
+	}
+
+	time_left = wait_for_completion_timeout(&ar->debug.tpc_complete,
+						1 * HZ);
+	if (time_left == 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+				    struct ath10k_tpc_stats *tpc_stats)
+{
+	spin_lock_bh(&ar->data_lock);
+
+	kfree(ar->debug.tpc_stats);
+	ar->debug.tpc_stats = tpc_stats;
+	complete(&ar->debug.tpc_complete);
+
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
+				   unsigned int j, char *buf, unsigned int *len)
+{
+	unsigned int i, buf_len;
+	static const char table_str[][5] = { "CDD",
+					     "STBC",
+					     "TXBF" };
+	static const char pream_str[][6] = { "CCK",
+					     "OFDM",
+					     "HT20",
+					     "HT40",
+					     "VHT20",
+					     "VHT40",
+					     "VHT80",
+					     "HTCUP" };
+
+	buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
+	*len += scnprintf(buf + *len, buf_len - *len,
+			  "********************************\n");
+	*len += scnprintf(buf + *len, buf_len - *len,
+			  "******************* %s POWER TABLE ****************\n",
+			  table_str[j]);
+	*len += scnprintf(buf + *len, buf_len - *len,
+			  "********************************\n");
+	*len += scnprintf(buf + *len, buf_len - *len,
+			  "No.  Preamble Rate_code tpc_value1 tpc_value2 tpc_value3\n");
+
+	for (i = 0; i < tpc_stats->rate_max; i++) {
+		*len += scnprintf(buf + *len, buf_len - *len,
+				  "%8d %s 0x%2x %s\n", i,
+				  pream_str[tpc_stats->tpc_table[j].pream_idx[i]],
+				  tpc_stats->tpc_table[j].rate_code[i],
+				  tpc_stats->tpc_table[j].tpc_value[i]);
+	}
+
+	*len += scnprintf(buf + *len, buf_len - *len,
+			  "***********************************\n");
+}
+
+static void ath10k_tpc_stats_fill(struct ath10k *ar,
+				  struct ath10k_tpc_stats *tpc_stats,
+				  char *buf)
+{
+	unsigned int len, j, buf_len;
+
+	len = 0;
+	buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
+
+	spin_lock_bh(&ar->data_lock);
+
+	if (!tpc_stats) {
+		ath10k_warn(ar, "failed to get tpc stats\n");
+		goto unlock;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "*************************************\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "TPC config for channel %4d mode %d\n",
+			 tpc_stats->chan_freq,
+			 tpc_stats->phy_mode);
+	len += scnprintf(buf + len, buf_len - len,
+			 "*************************************\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "CTL		=  0x%2x Reg. Domain		= %2d\n",
+			 tpc_stats->ctl,
+			 tpc_stats->reg_domain);
+	len += scnprintf(buf + len, buf_len - len,
+			 "Antenna Gain	= %2d Reg. Max Antenna Gain	=  %2d\n",
+			 tpc_stats->twice_antenna_gain,
+			 tpc_stats->twice_antenna_reduction);
+	len += scnprintf(buf + len, buf_len - len,
+			 "Power Limit	= %2d Reg. Max Power		= %2d\n",
+			 tpc_stats->power_limit,
+			 tpc_stats->twice_max_rd_power / 2);
+	len += scnprintf(buf + len, buf_len - len,
+			 "Num tx chains	= %2d Num supported rates	= %2d\n",
+			 tpc_stats->num_tx_chain,
+			 tpc_stats->rate_max);
+
+	for (j = 0; j < tpc_stats->num_tx_chain ; j++) {
+		switch (j) {
+		case WMI_TPC_TABLE_TYPE_CDD:
+			if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+				len += scnprintf(buf + len, buf_len - len,
+						 "CDD not supported\n");
+				break;
+			}
+
+			ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+			break;
+		case WMI_TPC_TABLE_TYPE_STBC:
+			if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+				len += scnprintf(buf + len, buf_len - len,
+						 "STBC not supported\n");
+				break;
+			}
+
+			ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+			break;
+		case WMI_TPC_TABLE_TYPE_TXBF:
+			if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+				len += scnprintf(buf + len, buf_len - len,
+						 "TXBF not supported\n***************************\n");
+				break;
+			}
+
+			ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+			break;
+		default:
+			len += scnprintf(buf + len, buf_len - len,
+					 "Invalid Type\n");
+			break;
+		}
+	}
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+
+	if (len >= buf_len)
+		buf[len - 1] = 0;
+	else
+		buf[len] = 0;
+}
+
+static int ath10k_tpc_stats_open(struct inode *inode, struct file *file)
+{
+	struct ath10k *ar = inode->i_private;
+	void *buf = NULL;
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_ON) {
+		ret = -ENETDOWN;
+		goto err_unlock;
+	}
+
+	buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto err_unlock;
+	}
+
+	ret = ath10k_debug_tpc_stats_request(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to request tpc config stats: %d\n",
+			    ret);
+		goto err_free;
+	}
+
+	ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf);
+	file->private_data = buf;
+
+	mutex_unlock(&ar->conf_mutex);
+	return 0;
+
+err_free:
+	vfree(buf);
+
+err_unlock:
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static int ath10k_tpc_stats_release(struct inode *inode, struct file *file)
+{
+	vfree(file->private_data);
+
+	return 0;
+}
+
+static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	unsigned int len = strlen(buf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tpc_stats = {
+	.open = ath10k_tpc_stats_open,
+	.release = ath10k_tpc_stats_release,
+	.read = ath10k_tpc_stats_read,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 int ath10k_debug_start(struct ath10k *ar)
 {
 	int ret;
@@ -2111,6 +2093,8 @@
 	ar->debug.fw_crash_data = NULL;
 
 	ath10k_debug_fw_stats_reset(ar);
+
+	kfree(ar->debug.tpc_stats);
 }
 
 int ath10k_debug_register(struct ath10k *ar)
@@ -2127,6 +2111,7 @@
 	INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
 			  ath10k_debug_htt_stats_dwork);
 
+	init_completion(&ar->debug.tpc_complete);
 	init_completion(&ar->debug.fw_stats_complete);
 
 	debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
@@ -2195,6 +2180,9 @@
 	debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
 			    ar->debug.debugfs_phy, ar, &fops_quiet_period);
 
+	debugfs_create_file("tpc_stats", S_IRUSR,
+			    ar->debug.debugfs_phy, ar, &fops_tpc_stats);
+
 	return 0;
 }
 
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 53bd6a1..7de780c 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -55,6 +55,9 @@
 	ATH10K_DBG_AGGR_MODE_MAX,
 };
 
+/* FIXME: How to calculate the buffer size sanely? */
+#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+
 extern unsigned int ath10k_debug_mask;
 
 __printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
@@ -70,6 +73,8 @@
 int ath10k_debug_register(struct ath10k *ar);
 void ath10k_debug_unregister(struct ath10k *ar);
 void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+				    struct ath10k_tpc_stats *tpc_stats);
 struct ath10k_fw_crash_data *
 ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
 
@@ -117,6 +122,12 @@
 {
 }
 
+static inline void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+						  struct ath10k_tpc_stats *tpc_stats)
+{
+	kfree(tpc_stats);
+}
+
 static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
 					   int len)
 {
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 0c92e02..89e7076 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -30,13 +30,6 @@
 	u16 len;
 };
 
-struct ath10k_hif_cb {
-	int (*tx_completion)(struct ath10k *ar,
-			     struct sk_buff *wbuf);
-	int (*rx_completion)(struct ath10k *ar,
-			     struct sk_buff *wbuf);
-};
-
 struct ath10k_hif_ops {
 	/* send a scatter-gather list to the target */
 	int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
@@ -65,8 +58,7 @@
 	void (*stop)(struct ath10k *ar);
 
 	int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
-				   u8 *ul_pipe, u8 *dl_pipe,
-				   int *ul_is_polled, int *dl_is_polled);
+				   u8 *ul_pipe, u8 *dl_pipe);
 
 	void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
 
@@ -80,9 +72,6 @@
 	 */
 	void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
 
-	void (*set_callbacks)(struct ath10k *ar,
-			      struct ath10k_hif_cb *callbacks);
-
 	u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
 
 	u32 (*read32)(struct ath10k *ar, u32 address);
@@ -142,13 +131,10 @@
 
 static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
 						 u16 service_id,
-						 u8 *ul_pipe, u8 *dl_pipe,
-						 int *ul_is_polled,
-						 int *dl_is_polled)
+						 u8 *ul_pipe, u8 *dl_pipe)
 {
 	return ar->hif.ops->map_service_to_pipe(ar, service_id,
-						ul_pipe, dl_pipe,
-						ul_is_polled, dl_is_polled);
+						ul_pipe, dl_pipe);
 }
 
 static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
@@ -163,12 +149,6 @@
 	ar->hif.ops->send_complete_check(ar, pipe_id, force);
 }
 
-static inline void ath10k_hif_set_callbacks(struct ath10k *ar,
-					    struct ath10k_hif_cb *callbacks)
-{
-	ar->hif.ops->set_callbacks(ar, callbacks);
-}
-
 static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
 						   u8 pipe_id)
 {
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 32d9ff1..5b3c6bc 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -23,16 +23,6 @@
 /* Send */
 /********/
 
-static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
-						  int force)
-{
-	/*
-	 * Check whether HIF has any prior sends that have finished,
-	 * have not had the post-processing done.
-	 */
-	ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
-}
-
 static void ath10k_htc_control_tx_complete(struct ath10k *ar,
 					   struct sk_buff *skb)
 {
@@ -181,24 +171,22 @@
 	return ret;
 }
 
-static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
-					    struct sk_buff *skb)
+void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct ath10k_htc *htc = &ar->htc;
 	struct ath10k_skb_cb *skb_cb;
 	struct ath10k_htc_ep *ep;
 
 	if (WARN_ON_ONCE(!skb))
-		return 0;
+		return;
 
 	skb_cb = ATH10K_SKB_CB(skb);
 	ep = &htc->endpoint[skb_cb->eid];
 
 	ath10k_htc_notify_tx_completion(ep, skb);
 	/* the skb now belongs to the completion handler */
-
-	return 0;
 }
+EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
 
 /***********/
 /* Receive */
@@ -304,8 +292,7 @@
 	return status;
 }
 
-static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
-					    struct sk_buff *skb)
+void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
 {
 	int status = 0;
 	struct ath10k_htc *htc = &ar->htc;
@@ -326,21 +313,11 @@
 		ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
 		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
 				hdr, sizeof(*hdr));
-		status = -EINVAL;
 		goto out;
 	}
 
 	ep = &htc->endpoint[eid];
 
-	/*
-	 * If this endpoint that received a message from the target has
-	 * a to-target HIF pipe whose send completions are polled rather
-	 * than interrupt-driven, this is a good point to ask HIF to check
-	 * whether it has any completed sends to handle.
-	 */
-	if (ep->ul_is_polled)
-		ath10k_htc_send_complete_check(ep, 1);
-
 	payload_len = __le16_to_cpu(hdr->len);
 
 	if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
@@ -348,7 +325,6 @@
 			    payload_len + sizeof(*hdr));
 		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
 				hdr, sizeof(*hdr));
-		status = -EINVAL;
 		goto out;
 	}
 
@@ -358,7 +334,6 @@
 			   skb->len, payload_len);
 		ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
 				"", hdr, sizeof(*hdr));
-		status = -EINVAL;
 		goto out;
 	}
 
@@ -374,7 +349,6 @@
 		    (trailer_len > payload_len)) {
 			ath10k_warn(ar, "Invalid trailer length: %d\n",
 				    trailer_len);
-			status = -EPROTO;
 			goto out;
 		}
 
@@ -407,7 +381,6 @@
 				 * sending unsolicited messages on the ep 0
 				 */
 				ath10k_warn(ar, "HTC rx ctrl still processing\n");
-				status = -EINVAL;
 				complete(&htc->ctl_resp);
 				goto out;
 			}
@@ -439,9 +412,8 @@
 	skb = NULL;
 out:
 	kfree_skb(skb);
-
-	return status;
 }
+EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
 
 static void ath10k_htc_control_rx_complete(struct ath10k *ar,
 					   struct sk_buff *skb)
@@ -767,9 +739,7 @@
 	status = ath10k_hif_map_service_to_pipe(htc->ar,
 						ep->service_id,
 						&ep->ul_pipe_id,
-						&ep->dl_pipe_id,
-						&ep->ul_is_polled,
-						&ep->dl_is_polled);
+						&ep->dl_pipe_id);
 	if (status)
 		return status;
 
@@ -778,10 +748,6 @@
 		   htc_service_name(ep->service_id), ep->ul_pipe_id,
 		   ep->dl_pipe_id, ep->eid);
 
-	ath10k_dbg(ar, ATH10K_DBG_BOOT,
-		   "boot htc ep %d ul polled %d dl polled %d\n",
-		   ep->eid, ep->ul_is_polled, ep->dl_is_polled);
-
 	if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
 		ep->tx_credit_flow_enabled = false;
 		ath10k_dbg(ar, ATH10K_DBG_BOOT,
@@ -841,7 +807,6 @@
 /* registered target arrival callback from the HIF layer */
 int ath10k_htc_init(struct ath10k *ar)
 {
-	struct ath10k_hif_cb htc_callbacks;
 	struct ath10k_htc_ep *ep = NULL;
 	struct ath10k_htc *htc = &ar->htc;
 
@@ -849,15 +814,11 @@
 
 	ath10k_htc_reset_endpoint_states(htc);
 
-	/* setup HIF layer callbacks */
-	htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
-	htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
 	htc->ar = ar;
 
 	/* Get HIF default pipe for HTC message exchange */
 	ep = &htc->endpoint[ATH10K_HTC_EP_0];
 
-	ath10k_hif_set_callbacks(ar, &htc_callbacks);
 	ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
 
 	init_completion(&htc->ctl_resp);
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index 527179c..e70aa38 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -312,8 +312,6 @@
 	int max_ep_message_len;
 	u8 ul_pipe_id;
 	u8 dl_pipe_id;
-	int ul_is_polled; /* call HIF to get tx completions */
-	int dl_is_polled; /* call HIF to fetch rx (not implemented) */
 
 	u8 seq_no; /* for debugging */
 	int tx_credits;
@@ -355,5 +353,7 @@
 int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
 		    struct sk_buff *packet);
 struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
+void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
 
 #endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 5731875..2bad50e 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1485,9 +1485,9 @@
 	spinlock_t tx_lock;
 	int max_num_pending_tx;
 	int num_pending_tx;
+	int num_pending_mgmt_tx;
 	struct idr pending_tx;
 	wait_queue_head_t empty_tx_wq;
-	struct dma_pool *tx_pool;
 
 	/* set if host-fw communication goes haywire
 	 * used to avoid further failures */
@@ -1508,6 +1508,11 @@
 		dma_addr_t paddr;
 		struct htt_msdu_ext_desc *vaddr;
 	} frag_desc;
+
+	struct {
+		dma_addr_t paddr;
+		struct ath10k_htt_txbuf *vaddr;
+	} txbuf;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
@@ -1586,8 +1591,9 @@
 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
 				u8 max_subfrms_ampdu,
 				u8 max_subfrms_amsdu);
+void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
 
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 1b7a043..6060dda 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -643,6 +643,8 @@
 	__be16 len;
 } __packed;
 
+#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
+
 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
 				  struct ieee80211_rx_status *status,
 				  struct htt_rx_desc *rxd)
@@ -650,6 +652,7 @@
 	struct ieee80211_supported_band *sband;
 	u8 cck, rate, bw, sgi, mcs, nss;
 	u8 preamble = 0;
+	u8 group_id;
 	u32 info1, info2, info3;
 
 	info1 = __le32_to_cpu(rxd->ppdu_start.info1);
@@ -692,10 +695,50 @@
 	case HTT_RX_VHT_WITH_TXBF:
 		/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
 		   TODO check this */
-		mcs = (info3 >> 4) & 0x0F;
-		nss = ((info2 >> 10) & 0x07) + 1;
 		bw = info2 & 3;
 		sgi = info3 & 1;
+		group_id = (info2 >> 4) & 0x3F;
+
+		if (GROUP_ID_IS_SU_MIMO(group_id)) {
+			mcs = (info3 >> 4) & 0x0F;
+			nss = ((info2 >> 10) & 0x07) + 1;
+		} else {
+			/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
+			 * so it's impossible to decode MCS. Also since
+			 * firmware consumes Group Id Management frames host
+			 * has no knowledge regarding group/user position
+			 * mapping so it's impossible to pick the correct Nsts
+			 * from VHT-SIG-A1.
+			 *
+			 * Bandwidth and SGI are valid so report the rateinfo
+			 * on best-effort basis.
+			 */
+			mcs = 0;
+			nss = 1;
+		}
+
+		if (mcs > 0x09) {
+			ath10k_warn(ar, "invalid MCS received %u\n", mcs);
+			ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
+				    __le32_to_cpu(rxd->attention.flags),
+				    __le32_to_cpu(rxd->mpdu_start.info0),
+				    __le32_to_cpu(rxd->mpdu_start.info1),
+				    __le32_to_cpu(rxd->msdu_start.common.info0),
+				    __le32_to_cpu(rxd->msdu_start.common.info1),
+				    rxd->ppdu_start.info0,
+				    __le32_to_cpu(rxd->ppdu_start.info1),
+				    __le32_to_cpu(rxd->ppdu_start.info2),
+				    __le32_to_cpu(rxd->ppdu_start.info3),
+				    __le32_to_cpu(rxd->ppdu_start.info4));
+
+			ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
+				    __le32_to_cpu(rxd->msdu_end.common.info0),
+				    __le32_to_cpu(rxd->mpdu_end.info0));
+
+			ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
+					"rx desc msdu payload: ",
+					rxd->msdu_payload, 50);
+		}
 
 		status->rate_idx = mcs;
 		status->vht_nss = nss;
@@ -2082,6 +2125,7 @@
 	/* Free the indication buffer */
 	dev_kfree_skb_any(skb);
 }
+EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
 
 static void ath10k_htt_txrx_compl_task(unsigned long ptr)
 {
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 43aa5e2..1682397 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -22,22 +22,28 @@
 #include "txrx.h"
 #include "debug.h"
 
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
 {
+	if (limit_mgmt_desc)
+		htt->num_pending_mgmt_tx--;
+
 	htt->num_pending_tx--;
 	if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
 		ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 }
 
-static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
+				      bool limit_mgmt_desc)
 {
 	spin_lock_bh(&htt->tx_lock);
-	__ath10k_htt_tx_dec_pending(htt);
+	__ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
 	spin_unlock_bh(&htt->tx_lock);
 }
 
-static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
+static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
+				     bool limit_mgmt_desc, bool is_probe_resp)
 {
+	struct ath10k *ar = htt->ar;
 	int ret = 0;
 
 	spin_lock_bh(&htt->tx_lock);
@@ -47,6 +53,15 @@
 		goto exit;
 	}
 
+	if (limit_mgmt_desc) {
+		if (is_probe_resp && (htt->num_pending_mgmt_tx >
+		    ar->hw_params.max_probe_resp_desc_thres)) {
+			ret = -EBUSY;
+			goto exit;
+		}
+		htt->num_pending_mgmt_tx++;
+	}
+
 	htt->num_pending_tx++;
 	if (htt->num_pending_tx == htt->max_num_pending_tx)
 		ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
@@ -93,9 +108,12 @@
 	spin_lock_init(&htt->tx_lock);
 	idr_init(&htt->pending_tx);
 
-	htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
-				       sizeof(struct ath10k_htt_txbuf), 4, 0);
-	if (!htt->tx_pool) {
+	size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
+	htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
+						  &htt->txbuf.paddr,
+						  GFP_DMA);
+	if (!htt->txbuf.vaddr) {
+		ath10k_err(ar, "failed to alloc tx buffer\n");
 		ret = -ENOMEM;
 		goto free_idr_pending_tx;
 	}
@@ -110,14 +128,17 @@
 	if (!htt->frag_desc.vaddr) {
 		ath10k_warn(ar, "failed to alloc fragment desc memory\n");
 		ret = -ENOMEM;
-		goto free_tx_pool;
+		goto free_txbuf;
 	}
 
 skip_frag_desc_alloc:
 	return 0;
 
-free_tx_pool:
-	dma_pool_destroy(htt->tx_pool);
+free_txbuf:
+	size = htt->max_num_pending_tx *
+			  sizeof(struct ath10k_htt_txbuf);
+	dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
+			  htt->txbuf.paddr);
 free_idr_pending_tx:
 	idr_destroy(&htt->pending_tx);
 	return ret;
@@ -145,7 +166,13 @@
 
 	idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
 	idr_destroy(&htt->pending_tx);
-	dma_pool_destroy(htt->tx_pool);
+
+	if (htt->txbuf.vaddr) {
+		size = htt->max_num_pending_tx *
+				  sizeof(struct ath10k_htt_txbuf);
+		dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
+				  htt->txbuf.paddr);
+	}
 
 	if (htt->frag_desc.vaddr) {
 		size = htt->max_num_pending_tx *
@@ -160,6 +187,12 @@
 	dev_kfree_skb_any(skb);
 }
 
+void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+	dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
+
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
@@ -417,8 +450,19 @@
 	int len = 0;
 	int msdu_id = -1;
 	int res;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+	bool limit_mgmt_desc = false;
+	bool is_probe_resp = false;
 
-	res = ath10k_htt_tx_inc_pending(htt);
+	if (ar->hw_params.max_probe_resp_desc_thres) {
+		limit_mgmt_desc = true;
+
+		if (ieee80211_is_probe_resp(hdr->frame_control))
+			is_probe_resp = true;
+	}
+
+	res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
+
 	if (res)
 		goto err;
 
@@ -428,9 +472,9 @@
 	spin_lock_bh(&htt->tx_lock);
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
 	spin_unlock_bh(&htt->tx_lock);
-	if (res < 0) {
+	if (res < 0)
 		goto err_tx_dec;
-	}
+
 	msdu_id = res;
 
 	txdesc = ath10k_htc_alloc_skb(ar, len);
@@ -476,7 +520,7 @@
 	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
 	spin_unlock_bh(&htt->tx_lock);
 err_tx_dec:
-	ath10k_htt_tx_dec_pending(htt);
+	ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
 err:
 	return res;
 }
@@ -495,32 +539,37 @@
 	int res;
 	u8 flags0 = 0;
 	u16 msdu_id, flags1 = 0;
-	dma_addr_t paddr = 0;
 	u32 frags_paddr = 0;
 	struct htt_msdu_ext_desc *ext_desc = NULL;
+	bool limit_mgmt_desc = false;
+	bool is_probe_resp = false;
 
-	res = ath10k_htt_tx_inc_pending(htt);
+	if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
+	    ar->hw_params.max_probe_resp_desc_thres) {
+		limit_mgmt_desc = true;
+
+		if (ieee80211_is_probe_resp(hdr->frame_control))
+			is_probe_resp = true;
+	}
+
+	res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
 	if (res)
 		goto err;
 
 	spin_lock_bh(&htt->tx_lock);
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
 	spin_unlock_bh(&htt->tx_lock);
-	if (res < 0) {
+	if (res < 0)
 		goto err_tx_dec;
-	}
+
 	msdu_id = res;
 
 	prefetch_len = min(htt->prefetch_len, msdu->len);
 	prefetch_len = roundup(prefetch_len, 4);
 
-	skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
-					   &paddr);
-	if (!skb_cb->htt.txbuf) {
-		res = -ENOMEM;
-		goto err_free_msdu_id;
-	}
-	skb_cb->htt.txbuf_paddr = paddr;
+	skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id];
+	skb_cb->htt.txbuf_paddr = htt->txbuf.paddr +
+		(sizeof(struct ath10k_htt_txbuf) * msdu_id);
 
 	if ((ieee80211_is_action(hdr->frame_control) ||
 	     ieee80211_is_deauth(hdr->frame_control) ||
@@ -528,7 +577,8 @@
 	     ieee80211_has_protected(hdr->frame_control)) {
 		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
 	} else if (!skb_cb->htt.nohwcrypt &&
-		   skb_cb->txmode == ATH10K_HW_TXRX_RAW) {
+		   skb_cb->txmode == ATH10K_HW_TXRX_RAW &&
+		   ieee80211_has_protected(hdr->frame_control)) {
 		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
 	}
 
@@ -537,7 +587,7 @@
 	res = dma_mapping_error(dev, skb_cb->paddr);
 	if (res) {
 		res = -EIO;
-		goto err_free_txbuf;
+		goto err_free_msdu_id;
 	}
 
 	switch (skb_cb->txmode) {
@@ -669,16 +719,12 @@
 
 err_unmap_msdu:
 	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
-err_free_txbuf:
-	dma_pool_free(htt->tx_pool,
-		      skb_cb->htt.txbuf,
-		      skb_cb->htt.txbuf_paddr);
 err_free_msdu_id:
 	spin_lock_bh(&htt->tx_lock);
 	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
 	spin_unlock_bh(&htt->tx_lock);
 err_tx_dec:
-	ath10k_htt_tx_dec_pending(htt);
+	ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
 err:
 	return res;
 }
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 23afcda..39966a0 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -84,6 +84,15 @@
 #define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
 #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR	0x1234
 
+/* QCA9377 1.0 definitions */
+#define QCA9377_HW_1_0_DEV_VERSION     0x05020001
+#define QCA9377_HW_1_0_CHIP_ID_REV     0x1
+#define QCA9377_HW_1_0_FW_DIR          ATH10K_FW_DIR "/QCA9377/hw1.0"
+#define QCA9377_HW_1_0_FW_FILE         "firmware.bin"
+#define QCA9377_HW_1_0_OTP_FILE        "otp.bin"
+#define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA9377_HW_1_0_PATCH_LOAD_ADDR	0x1234
+
 #define ATH10K_FW_API2_FILE		"firmware-2.bin"
 #define ATH10K_FW_API3_FILE		"firmware-3.bin"
 
@@ -94,9 +103,13 @@
 #define ATH10K_FW_API5_FILE		"firmware-5.bin"
 
 #define ATH10K_FW_UTF_FILE		"utf.bin"
+#define ATH10K_FW_UTF_API2_FILE		"utf-2.bin"
 
 /* includes also the null byte */
 #define ATH10K_FIRMWARE_MAGIC               "QCA-ATH10K"
+#define ATH10K_BOARD_MAGIC                  "QCA-ATH10K-BOARD"
+
+#define ATH10K_BOARD_API2_FILE         "board-2.bin"
 
 #define REG_DUMP_COUNT_QCA988X 60
 
@@ -159,10 +172,21 @@
 	ATH10K_FW_HTT_OP_VERSION_MAX,
 };
 
+enum ath10k_bd_ie_type {
+	/* contains sub IEs of enum ath10k_bd_ie_board_type */
+	ATH10K_BD_IE_BOARD = 0,
+};
+
+enum ath10k_bd_ie_board_type {
+	ATH10K_BD_IE_BOARD_NAME = 0,
+	ATH10K_BD_IE_BOARD_DATA = 1,
+};
+
 enum ath10k_hw_rev {
 	ATH10K_HW_QCA988X,
 	ATH10K_HW_QCA6174,
 	ATH10K_HW_QCA99X0,
+	ATH10K_HW_QCA9377,
 };
 
 struct ath10k_hw_regs {
@@ -215,6 +239,7 @@
 #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
 #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
 #define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
+#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
 
 /* Known pecularities:
  *  - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
@@ -337,7 +362,7 @@
 #define TARGET_10X_MAX_FRAG_ENTRIES		0
 
 /* 10.2 parameters */
-#define TARGET_10_2_DMA_BURST_SIZE		1
+#define TARGET_10_2_DMA_BURST_SIZE		0
 
 /* Target specific defines for WMI-TLV firmware */
 #define TARGET_TLV_NUM_VDEVS			4
@@ -391,7 +416,7 @@
 
 #define TARGET_10_4_TX_DBG_LOG_SIZE		1024
 #define TARGET_10_4_NUM_WDS_ENTRIES		32
-#define TARGET_10_4_DMA_BURST_SIZE		1
+#define TARGET_10_4_DMA_BURST_SIZE		0
 #define TARGET_10_4_MAC_AGGR_DELIM		0
 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
 #define TARGET_10_4_VOW_CONFIG			0
@@ -414,16 +439,6 @@
 #define CE_COUNT ar->hw_values->ce_count
 
 /*
- * Total number of PCIe MSI interrupts requested for all interrupt sources.
- * PCIe standard forces this to be a power of 2.
- * Some Host OS's limit MSI requests that can be granted to 8
- * so for now we abide by this limit and avoid requesting more
- * than that.
- */
-#define MSI_NUM_REQUEST_LOG2	3
-#define MSI_NUM_REQUEST		(1<<MSI_NUM_REQUEST_LOG2)
-
-/*
  * Granted MSIs are assigned as follows:
  * Firmware uses the first
  * Remaining MSIs, if any, are used by Copy Engines
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 64674c9..a7411fe 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -197,9 +197,8 @@
 		return -EOPNOTSUPP;
 	}
 
-	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-	}
 
 	if (cmd == DISABLE_KEY) {
 		arg.key_cipher = WMI_CIPHER_NONE;
@@ -1070,6 +1069,7 @@
 		return false;
 
 	return ar->monitor ||
+	       ar->filter_flags & FIF_OTHER_BSS ||
 	       test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
 }
 
@@ -1110,7 +1110,8 @@
 
 			ret = ath10k_monitor_stop(ar);
 			if (ret)
-				ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", ret);
+				ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
+					    ret);
 				/* not serious */
 		}
 
@@ -2083,7 +2084,8 @@
 	enum ieee80211_band band;
 	const u8 *ht_mcs_mask;
 	const u16 *vht_mcs_mask;
-	int i, n, max_nss;
+	int i, n;
+	u8 max_nss;
 	u32 stbc;
 
 	lockdep_assert_held(&ar->conf_mutex);
@@ -2168,7 +2170,7 @@
 			arg->peer_ht_rates.rates[i] = i;
 	} else {
 		arg->peer_ht_rates.num_rates = n;
-		arg->peer_num_spatial_streams = max_nss;
+		arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
 	}
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -3617,9 +3619,6 @@
 	}
 	spin_unlock_bh(&ar->data_lock);
 
-	/* Add a 200ms margin to account for event/command processing */
-	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
-				     msecs_to_jiffies(arg->max_scan_time+200));
 	return 0;
 }
 
@@ -3737,13 +3736,8 @@
 
 	mutex_lock(&ar->conf_mutex);
 
-	if (ar->cfg_tx_chainmask) {
-		*tx_ant = ar->cfg_tx_chainmask;
-		*rx_ant = ar->cfg_rx_chainmask;
-	} else {
-		*tx_ant = ar->supp_tx_chainmask;
-		*rx_ant = ar->supp_rx_chainmask;
-	}
+	*tx_ant = ar->cfg_tx_chainmask;
+	*rx_ant = ar->cfg_rx_chainmask;
 
 	mutex_unlock(&ar->conf_mutex);
 
@@ -3763,6 +3757,169 @@
 		    dbg, cm);
 }
 
+static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
+{
+	int nsts = ar->vht_cap_info;
+
+	nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+	nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+
+	/* If firmware does not deliver to host number of space-time
+	 * streams supported, assume it support up to 4 BF STS and return
+	 * the value for VHT CAP: nsts-1)
+	 */
+	if (nsts == 0)
+		return 3;
+
+	return nsts;
+}
+
+static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
+{
+	int sound_dim = ar->vht_cap_info;
+
+	sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+	sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+
+	/* If the sounding dimension is not advertised by the firmware,
+	 * let's use a default value of 1
+	 */
+	if (sound_dim == 0)
+		return 1;
+
+	return sound_dim;
+}
+
+static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
+{
+	struct ieee80211_sta_vht_cap vht_cap = {0};
+	u16 mcs_map;
+	u32 val;
+	int i;
+
+	vht_cap.vht_supported = 1;
+	vht_cap.cap = ar->vht_cap_info;
+
+	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+		val = ath10k_mac_get_vht_cap_bf_sts(ar);
+		val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+		val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+		vht_cap.cap |= val;
+	}
+
+	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+		val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
+		val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+		val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+		vht_cap.cap |= val;
+	}
+
+	mcs_map = 0;
+	for (i = 0; i < 8; i++) {
+		if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
+			mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+		else
+			mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+	}
+
+	vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+
+	return vht_cap;
+}
+
+static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
+{
+	int i;
+	struct ieee80211_sta_ht_cap ht_cap = {0};
+
+	if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
+		return ht_cap;
+
+	ht_cap.ht_supported = 1;
+	ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+	ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+	ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+	ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+	ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+	if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
+		ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+	if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
+		ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
+		u32 smps;
+
+		smps   = WLAN_HT_CAP_SM_PS_DYNAMIC;
+		smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+		ht_cap.cap |= smps;
+	}
+
+	if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
+		ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+	if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
+		u32 stbc;
+
+		stbc   = ar->ht_cap_info;
+		stbc  &= WMI_HT_CAP_RX_STBC;
+		stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+		stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+		stbc  &= IEEE80211_HT_CAP_RX_STBC;
+
+		ht_cap.cap |= stbc;
+	}
+
+	if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
+		ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+	if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
+		ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+	/* max AMSDU is implicitly taken from vht_cap_info */
+	if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+		ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+	for (i = 0; i < ar->num_rf_chains; i++) {
+		if (ar->cfg_rx_chainmask & BIT(i))
+			ht_cap.mcs.rx_mask[i] = 0xFF;
+	}
+
+	ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+	return ht_cap;
+}
+
+static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
+{
+	struct ieee80211_supported_band *band;
+	struct ieee80211_sta_vht_cap vht_cap;
+	struct ieee80211_sta_ht_cap ht_cap;
+
+	ht_cap = ath10k_get_ht_cap(ar);
+	vht_cap = ath10k_create_vht_cap(ar);
+
+	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+		band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+		band->ht_cap = ht_cap;
+
+		/* Enable the VHT support at 2.4 GHz */
+		band->vht_cap = vht_cap;
+	}
+	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
+		band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+		band->ht_cap = ht_cap;
+		band->vht_cap = vht_cap;
+	}
+}
+
 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
 {
 	int ret;
@@ -3795,6 +3952,9 @@
 		return ret;
 	}
 
+	/* Reload HT/VHT capability */
+	ath10k_mac_setup_ht_vht_cap(ar);
+
 	return 0;
 }
 
@@ -3885,9 +4045,7 @@
 		}
 	}
 
-	if (ar->cfg_tx_chainmask)
-		__ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
-				     ar->cfg_rx_chainmask);
+	__ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
 
 	/*
 	 * By default FW set ARP frames ac to voice (6). In that case ARP
@@ -3906,6 +4064,18 @@
 		goto err_core_stop;
 	}
 
+	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
+		     ar->fw_features)) {
+		ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
+							  WMI_CCA_DETECT_LEVEL_AUTO,
+							  WMI_CCA_DETECT_MARGIN_AUTO);
+		if (ret) {
+			ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
+				    ret);
+			goto err_core_stop;
+		}
+	}
+
 	ret = ath10k_wmi_pdev_set_param(ar,
 					ar->wmi.pdev_param->ani_enable, 1);
 	if (ret) {
@@ -4068,17 +4238,21 @@
 {
 	u32 value = 0;
 	struct ath10k *ar = arvif->ar;
+	int nsts;
+	int sound_dim;
 
 	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
 		return 0;
 
+	nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
 				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
-		value |= SM((ar->num_rf_chains - 1), WMI_TXBF_STS_CAP_OFFSET);
+		value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
 
+	sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
 				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
-		value |= SM((ar->num_rf_chains - 1), WMI_BF_SOUND_DIM_OFFSET);
+		value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
 
 	if (!value)
 		return 0;
@@ -4175,6 +4349,14 @@
 	case NL80211_IFTYPE_ADHOC:
 		arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
 		break;
+	case NL80211_IFTYPE_MESH_POINT:
+		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+			ret = -EINVAL;
+			ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
+			goto err;
+		}
+		arvif->vdev_type = WMI_VDEV_TYPE_AP;
+		break;
 	case NL80211_IFTYPE_AP:
 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
 
@@ -4215,6 +4397,7 @@
 	 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
 	 */
 	if (vif->type == NL80211_IFTYPE_ADHOC ||
+	    vif->type == NL80211_IFTYPE_MESH_POINT ||
 	    vif->type == NL80211_IFTYPE_AP) {
 		arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
 							IEEE80211_MAX_FRAME_LEN,
@@ -4554,6 +4737,13 @@
 		if (ret)
 			ath10k_warn(ar, "failed to update beacon template: %d\n",
 				    ret);
+
+		if (ieee80211_vif_is_mesh(vif)) {
+			/* mesh doesn't use SSID but firmware needs it */
+			strncpy(arvif->u.ap.ssid, "mesh",
+				sizeof(arvif->u.ap.ssid));
+			arvif->u.ap.ssid_len = 4;
+		}
 	}
 
 	if (changed & BSS_CHANGED_AP_PROBE_RESP) {
@@ -4607,7 +4797,7 @@
 						info->use_cts_prot ? 1 : 0);
 		if (ret)
 			ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
-					info->use_cts_prot, arvif->vdev_id, ret);
+				    info->use_cts_prot, arvif->vdev_id, ret);
 	}
 
 	if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -4751,6 +4941,11 @@
 		spin_unlock_bh(&ar->data_lock);
 	}
 
+	/* Add a 200ms margin to account for event/command processing */
+	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+				     msecs_to_jiffies(arg.max_scan_time +
+						      200));
+
 exit:
 	mutex_unlock(&ar->conf_mutex);
 	return ret;
@@ -5293,6 +5488,7 @@
 	} else if (old_state == IEEE80211_STA_AUTH &&
 		   new_state == IEEE80211_STA_ASSOC &&
 		   (vif->type == NL80211_IFTYPE_AP ||
+		    vif->type == NL80211_IFTYPE_MESH_POINT ||
 		    vif->type == NL80211_IFTYPE_ADHOC)) {
 		/*
 		 * New association.
@@ -5328,6 +5524,7 @@
 	} else if (old_state == IEEE80211_STA_ASSOC &&
 		    new_state == IEEE80211_STA_AUTH &&
 		    (vif->type == NL80211_IFTYPE_AP ||
+		     vif->type == NL80211_IFTYPE_MESH_POINT ||
 		     vif->type == NL80211_IFTYPE_ADHOC)) {
 		/*
 		 * Disassociation.
@@ -5901,7 +6098,7 @@
 }
 
 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
-					    u8 rate, u8 nss, u8 sgi)
+					    u8 rate, u8 nss, u8 sgi, u8 ldpc)
 {
 	struct ath10k *ar = arvif->ar;
 	u32 vdev_param;
@@ -5934,6 +6131,13 @@
 		return ret;
 	}
 
+	vdev_param = ar->wmi.vdev_param->ldpc;
+	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
+	if (ret) {
+		ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
+		return ret;
+	}
+
 	return 0;
 }
 
@@ -5997,6 +6201,7 @@
 	u8 rate;
 	u8 nss;
 	u8 sgi;
+	u8 ldpc;
 	int single_nss;
 	int ret;
 
@@ -6006,6 +6211,7 @@
 	band = def.chan->band;
 	ht_mcs_mask = mask->control[band].ht_mcs;
 	vht_mcs_mask = mask->control[band].vht_mcs;
+	ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
 
 	sgi = mask->control[band].gi;
 	if (sgi == NL80211_TXRATE_FORCE_LGI)
@@ -6044,7 +6250,7 @@
 
 	mutex_lock(&ar->conf_mutex);
 
-	ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi);
+	ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
 	if (ret) {
 		ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
 			    arvif->vdev_id, ret);
@@ -6144,7 +6350,7 @@
 			       struct ieee80211_vif *vif,
 			       enum ieee80211_ampdu_mlme_action action,
 			       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			       u8 buf_size)
+			       u8 buf_size, bool amsdu)
 {
 	struct ath10k *ar = hw->priv;
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
@@ -6203,8 +6409,8 @@
 	rcu_read_lock();
 	if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
 		ieee80211_iter_chan_contexts_atomic(ar->hw,
-					ath10k_mac_get_any_chandef_iter,
-					&def);
+						    ath10k_mac_get_any_chandef_iter,
+						    &def);
 
 		if (vifs)
 			def = &vifs[0].new_ctx->def;
@@ -6218,6 +6424,94 @@
 	rcu_read_unlock();
 }
 
+static void
+ath10k_mac_update_vif_chan(struct ath10k *ar,
+			   struct ieee80211_vif_chanctx_switch *vifs,
+			   int n_vifs)
+{
+	struct ath10k_vif *arvif;
+	int ret;
+	int i;
+
+	lockdep_assert_held(&ar->conf_mutex);
+
+	/* First stop monitor interface. Some FW versions crash if there's a
+	 * lone monitor interface.
+	 */
+	if (ar->monitor_started)
+		ath10k_monitor_stop(ar);
+
+	for (i = 0; i < n_vifs; i++) {
+		arvif = ath10k_vif_to_arvif(vifs[i].vif);
+
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
+			   arvif->vdev_id,
+			   vifs[i].old_ctx->def.chan->center_freq,
+			   vifs[i].new_ctx->def.chan->center_freq,
+			   vifs[i].old_ctx->def.width,
+			   vifs[i].new_ctx->def.width);
+
+		if (WARN_ON(!arvif->is_started))
+			continue;
+
+		if (WARN_ON(!arvif->is_up))
+			continue;
+
+		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+		if (ret) {
+			ath10k_warn(ar, "failed to down vdev %d: %d\n",
+				    arvif->vdev_id, ret);
+			continue;
+		}
+	}
+
+	/* All relevant vdevs are downed and associated channel resources
+	 * should be available for the channel switch now.
+	 */
+
+	spin_lock_bh(&ar->data_lock);
+	ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
+	spin_unlock_bh(&ar->data_lock);
+
+	for (i = 0; i < n_vifs; i++) {
+		arvif = ath10k_vif_to_arvif(vifs[i].vif);
+
+		if (WARN_ON(!arvif->is_started))
+			continue;
+
+		if (WARN_ON(!arvif->is_up))
+			continue;
+
+		ret = ath10k_mac_setup_bcn_tmpl(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+				    ret);
+
+		ret = ath10k_mac_setup_prb_tmpl(arvif);
+		if (ret)
+			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+				    ret);
+
+		ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
+		if (ret) {
+			ath10k_warn(ar, "failed to restart vdev %d: %d\n",
+				    arvif->vdev_id, ret);
+			continue;
+		}
+
+		ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+					 arvif->bssid);
+		if (ret) {
+			ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
+				    arvif->vdev_id, ret);
+			continue;
+		}
+	}
+
+	ath10k_monitor_recalc(ar);
+}
+
 static int
 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
 			  struct ieee80211_chanctx_conf *ctx)
@@ -6264,12 +6558,52 @@
 	mutex_unlock(&ar->conf_mutex);
 }
 
+struct ath10k_mac_change_chanctx_arg {
+	struct ieee80211_chanctx_conf *ctx;
+	struct ieee80211_vif_chanctx_switch *vifs;
+	int n_vifs;
+	int next_vif;
+};
+
+static void
+ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
+				   struct ieee80211_vif *vif)
+{
+	struct ath10k_mac_change_chanctx_arg *arg = data;
+
+	if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
+		return;
+
+	arg->n_vifs++;
+}
+
+static void
+ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
+				    struct ieee80211_vif *vif)
+{
+	struct ath10k_mac_change_chanctx_arg *arg = data;
+	struct ieee80211_chanctx_conf *ctx;
+
+	ctx = rcu_access_pointer(vif->chanctx_conf);
+	if (ctx != arg->ctx)
+		return;
+
+	if (WARN_ON(arg->next_vif == arg->n_vifs))
+		return;
+
+	arg->vifs[arg->next_vif].vif = vif;
+	arg->vifs[arg->next_vif].old_ctx = ctx;
+	arg->vifs[arg->next_vif].new_ctx = ctx;
+	arg->next_vif++;
+}
+
 static void
 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
 			     struct ieee80211_chanctx_conf *ctx,
 			     u32 changed)
 {
 	struct ath10k *ar = hw->priv;
+	struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
 
 	mutex_lock(&ar->conf_mutex);
 
@@ -6283,6 +6617,30 @@
 	if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
 		goto unlock;
 
+	if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
+		ieee80211_iterate_active_interfaces_atomic(
+					hw,
+					IEEE80211_IFACE_ITER_NORMAL,
+					ath10k_mac_change_chanctx_cnt_iter,
+					&arg);
+		if (arg.n_vifs == 0)
+			goto radar;
+
+		arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
+				   GFP_KERNEL);
+		if (!arg.vifs)
+			goto radar;
+
+		ieee80211_iterate_active_interfaces_atomic(
+					hw,
+					IEEE80211_IFACE_ITER_NORMAL,
+					ath10k_mac_change_chanctx_fill_iter,
+					&arg);
+		ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
+		kfree(arg.vifs);
+	}
+
+radar:
 	ath10k_recalc_radar_detection(ar);
 
 	/* FIXME: How to configure Rx chains properly? */
@@ -6402,91 +6760,13 @@
 				 enum ieee80211_chanctx_switch_mode mode)
 {
 	struct ath10k *ar = hw->priv;
-	struct ath10k_vif *arvif;
-	int ret;
-	int i;
 
 	mutex_lock(&ar->conf_mutex);
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC,
 		   "mac chanctx switch n_vifs %d mode %d\n",
 		   n_vifs, mode);
-
-	/* First stop monitor interface. Some FW versions crash if there's a
-	 * lone monitor interface.
-	 */
-	if (ar->monitor_started)
-		ath10k_monitor_stop(ar);
-
-	for (i = 0; i < n_vifs; i++) {
-		arvif = ath10k_vif_to_arvif(vifs[i].vif);
-
-		ath10k_dbg(ar, ATH10K_DBG_MAC,
-			   "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
-			   arvif->vdev_id,
-			   vifs[i].old_ctx->def.chan->center_freq,
-			   vifs[i].new_ctx->def.chan->center_freq,
-			   vifs[i].old_ctx->def.width,
-			   vifs[i].new_ctx->def.width);
-
-		if (WARN_ON(!arvif->is_started))
-			continue;
-
-		if (WARN_ON(!arvif->is_up))
-			continue;
-
-		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
-		if (ret) {
-			ath10k_warn(ar, "failed to down vdev %d: %d\n",
-				    arvif->vdev_id, ret);
-			continue;
-		}
-	}
-
-	/* All relevant vdevs are downed and associated channel resources
-	 * should be available for the channel switch now.
-	 */
-
-	spin_lock_bh(&ar->data_lock);
-	ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
-	spin_unlock_bh(&ar->data_lock);
-
-	for (i = 0; i < n_vifs; i++) {
-		arvif = ath10k_vif_to_arvif(vifs[i].vif);
-
-		if (WARN_ON(!arvif->is_started))
-			continue;
-
-		if (WARN_ON(!arvif->is_up))
-			continue;
-
-		ret = ath10k_mac_setup_bcn_tmpl(arvif);
-		if (ret)
-			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
-				    ret);
-
-		ret = ath10k_mac_setup_prb_tmpl(arvif);
-		if (ret)
-			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
-				    ret);
-
-		ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
-		if (ret) {
-			ath10k_warn(ar, "failed to restart vdev %d: %d\n",
-				    arvif->vdev_id, ret);
-			continue;
-		}
-
-		ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
-					 arvif->bssid);
-		if (ret) {
-			ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
-				    arvif->vdev_id, ret);
-			continue;
-		}
-	}
-
-	ath10k_monitor_recalc(ar);
+	ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
 
 	mutex_unlock(&ar->conf_mutex);
 	return 0;
@@ -6642,6 +6922,9 @@
 	{
 	.max	= 7,
 	.types	= BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+		| BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
 	},
 };
 
@@ -6649,6 +6932,9 @@
 	{
 	.max	= 8,
 	.types	= BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+		| BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
 	},
 };
 
@@ -6686,6 +6972,9 @@
 	{
 		.max = 2,
 		.types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+			 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
 			 BIT(NL80211_IFTYPE_P2P_CLIENT) |
 			 BIT(NL80211_IFTYPE_P2P_GO),
 	},
@@ -6707,6 +6996,9 @@
 	{
 		.max = 1,
 		.types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+			 BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
 			 BIT(NL80211_IFTYPE_P2P_GO),
 	},
 	{
@@ -6773,6 +7065,9 @@
 	{
 		.max	= 16,
 		.types	= BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+			| BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
 	},
 };
 
@@ -6792,111 +7087,6 @@
 	},
 };
 
-static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
-{
-	struct ieee80211_sta_vht_cap vht_cap = {0};
-	u16 mcs_map;
-	u32 val;
-	int i;
-
-	vht_cap.vht_supported = 1;
-	vht_cap.cap = ar->vht_cap_info;
-
-	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
-				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
-		val = ar->num_rf_chains - 1;
-		val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
-		val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
-
-		vht_cap.cap |= val;
-	}
-
-	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
-				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
-		val = ar->num_rf_chains - 1;
-		val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
-		val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
-
-		vht_cap.cap |= val;
-	}
-
-	mcs_map = 0;
-	for (i = 0; i < 8; i++) {
-		if (i < ar->num_rf_chains)
-			mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2);
-		else
-			mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2);
-	}
-
-	vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
-	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
-
-	return vht_cap;
-}
-
-static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
-{
-	int i;
-	struct ieee80211_sta_ht_cap ht_cap = {0};
-
-	if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
-		return ht_cap;
-
-	ht_cap.ht_supported = 1;
-	ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
-	ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
-	ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
-	ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
-	ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
-
-	if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
-		ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
-
-	if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
-		ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
-
-	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
-		u32 smps;
-
-		smps   = WLAN_HT_CAP_SM_PS_DYNAMIC;
-		smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
-
-		ht_cap.cap |= smps;
-	}
-
-	if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
-		ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
-
-	if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
-		u32 stbc;
-
-		stbc   = ar->ht_cap_info;
-		stbc  &= WMI_HT_CAP_RX_STBC;
-		stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
-		stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
-		stbc  &= IEEE80211_HT_CAP_RX_STBC;
-
-		ht_cap.cap |= stbc;
-	}
-
-	if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
-		ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
-
-	if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
-		ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
-
-	/* max AMSDU is implicitly taken from vht_cap_info */
-	if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
-		ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
-
-	for (i = 0; i < ar->num_rf_chains; i++)
-		ht_cap.mcs.rx_mask[i] = 0xFF;
-
-	ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
-
-	return ht_cap;
-}
-
 static void ath10k_get_arvif_iter(void *data, u8 *mac,
 				  struct ieee80211_vif *vif)
 {
@@ -6938,8 +7128,6 @@
 		WLAN_CIPHER_SUITE_AES_CMAC,
 	};
 	struct ieee80211_supported_band *band;
-	struct ieee80211_sta_vht_cap vht_cap;
-	struct ieee80211_sta_ht_cap ht_cap;
 	void *channels;
 	int ret;
 
@@ -6947,9 +7135,6 @@
 
 	SET_IEEE80211_DEV(ar->hw, ar->dev);
 
-	ht_cap = ath10k_get_ht_cap(ar);
-	vht_cap = ath10k_create_vht_cap(ar);
-
 	BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
 		      ARRAY_SIZE(ath10k_5ghz_channels)) !=
 		     ATH10K_NUM_CHANS);
@@ -6968,10 +7153,6 @@
 		band->channels = channels;
 		band->n_bitrates = ath10k_g_rates_size;
 		band->bitrates = ath10k_g_rates;
-		band->ht_cap = ht_cap;
-
-		/* Enable the VHT support at 2.4 GHz */
-		band->vht_cap = vht_cap;
 
 		ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
 	}
@@ -6990,17 +7171,18 @@
 		band->channels = channels;
 		band->n_bitrates = ath10k_a_rates_size;
 		band->bitrates = ath10k_a_rates;
-		band->ht_cap = ht_cap;
-		band->vht_cap = vht_cap;
 		ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
 	}
 
+	ath10k_mac_setup_ht_vht_cap(ar);
+
 	ar->hw->wiphy->interface_modes =
 		BIT(NL80211_IFTYPE_STATION) |
-		BIT(NL80211_IFTYPE_AP);
+		BIT(NL80211_IFTYPE_AP) |
+		BIT(NL80211_IFTYPE_MESH_POINT);
 
-	ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
-	ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
+	ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
+	ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
 
 	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
 		ar->hw->wiphy->interface_modes |=
@@ -7146,7 +7328,7 @@
 			    ath10k_reg_notifier);
 	if (ret) {
 		ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
-		goto err_free;
+		goto err_dfs_detector_exit;
 	}
 
 	ar->hw->wiphy->cipher_suites = cipher_suites;
@@ -7155,7 +7337,7 @@
 	ret = ieee80211_register_hw(ar->hw);
 	if (ret) {
 		ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
-		goto err_free;
+		goto err_dfs_detector_exit;
 	}
 
 	if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
@@ -7169,10 +7351,16 @@
 
 err_unregister:
 	ieee80211_unregister_hw(ar->hw);
+
+err_dfs_detector_exit:
+	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+		ar->dfs_detector->exit(ar->dfs_detector);
+
 err_free:
 	kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
 	kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
 
+	SET_IEEE80211_DEV(ar->hw, NULL);
 	return ret;
 }
 
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 1046ab6..3fca200 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -61,12 +61,14 @@
 #define QCA6164_2_1_DEVICE_ID	(0x0041)
 #define QCA6174_2_1_DEVICE_ID	(0x003e)
 #define QCA99X0_2_0_DEVICE_ID	(0x0040)
+#define QCA9377_1_0_DEVICE_ID	(0x0042)
 
 static const struct pci_device_id ath10k_pci_id_table[] = {
 	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
 	{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
 	{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
 	{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
+	{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
 	{0}
 };
 
@@ -90,6 +92,7 @@
 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
 
 	{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
+	{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
 };
 
 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
@@ -104,6 +107,10 @@
 			       struct ath10k_ce_pipe *rx_pipe,
 			       struct bmi_xfer *xfer);
 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
+static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
 
 static const struct ce_attr host_ce_config_wlan[] = {
 	/* CE0: host->target HTC control and raw streams */
@@ -112,6 +119,7 @@
 		.src_nentries = 16,
 		.src_sz_max = 256,
 		.dest_nentries = 0,
+		.send_cb = ath10k_pci_htc_tx_cb,
 	},
 
 	/* CE1: target->host HTT + HTC control */
@@ -120,6 +128,7 @@
 		.src_nentries = 0,
 		.src_sz_max = 2048,
 		.dest_nentries = 512,
+		.recv_cb = ath10k_pci_htc_rx_cb,
 	},
 
 	/* CE2: target->host WMI */
@@ -128,6 +137,7 @@
 		.src_nentries = 0,
 		.src_sz_max = 2048,
 		.dest_nentries = 128,
+		.recv_cb = ath10k_pci_htc_rx_cb,
 	},
 
 	/* CE3: host->target WMI */
@@ -136,6 +146,7 @@
 		.src_nentries = 32,
 		.src_sz_max = 2048,
 		.dest_nentries = 0,
+		.send_cb = ath10k_pci_htc_tx_cb,
 	},
 
 	/* CE4: host->target HTT */
@@ -144,14 +155,16 @@
 		.src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
 		.src_sz_max = 256,
 		.dest_nentries = 0,
+		.send_cb = ath10k_pci_htt_tx_cb,
 	},
 
-	/* CE5: unused */
+	/* CE5: target->host HTT (HIF->HTT) */
 	{
 		.flags = CE_ATTR_FLAGS,
 		.src_nentries = 0,
-		.src_sz_max = 0,
-		.dest_nentries = 0,
+		.src_sz_max = 512,
+		.dest_nentries = 512,
+		.recv_cb = ath10k_pci_htt_rx_cb,
 	},
 
 	/* CE6: target autonomous hif_memcpy */
@@ -257,12 +270,12 @@
 
 	/* NB: 50% of src nentries, since tx has 2 frags */
 
-	/* CE5: unused */
+	/* CE5: target->host HTT (HIF->HTT) */
 	{
 		.pipenum = __cpu_to_le32(5),
-		.pipedir = __cpu_to_le32(PIPEDIR_OUT),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
 		.nentries = __cpu_to_le32(32),
-		.nbytes_max = __cpu_to_le32(2048),
+		.nbytes_max = __cpu_to_le32(512),
 		.flags = __cpu_to_le32(CE_ATTR_FLAGS),
 		.reserved = __cpu_to_le32(0),
 	},
@@ -396,7 +409,7 @@
 	{
 		__cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 		__cpu_to_le32(PIPEDIR_IN),	/* in = DL = target -> host */
-		__cpu_to_le32(1),
+		__cpu_to_le32(5),
 	},
 
 	/* (Additions here) */
@@ -452,8 +465,12 @@
 	int curr_delay = 5;
 
 	while (tot_delay < PCIE_WAKE_TIMEOUT) {
-		if (ath10k_pci_is_awake(ar))
+		if (ath10k_pci_is_awake(ar)) {
+			if (tot_delay > PCIE_WAKE_LATE_US)
+				ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n",
+					    tot_delay / 1000);
 			return 0;
+		}
 
 		udelay(curr_delay);
 		tot_delay += curr_delay;
@@ -465,12 +482,53 @@
 	return -ETIMEDOUT;
 }
 
+static int ath10k_pci_force_wake(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+	if (!ar_pci->ps_awake) {
+		iowrite32(PCIE_SOC_WAKE_V_MASK,
+			  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+			  PCIE_SOC_WAKE_ADDRESS);
+
+		ret = ath10k_pci_wake_wait(ar);
+		if (ret == 0)
+			ar_pci->ps_awake = true;
+	}
+
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+	return ret;
+}
+
+static void ath10k_pci_force_sleep(struct ath10k *ar)
+{
+	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+	iowrite32(PCIE_SOC_WAKE_RESET,
+		  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+		  PCIE_SOC_WAKE_ADDRESS);
+	ar_pci->ps_awake = false;
+
+	spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
 static int ath10k_pci_wake(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	unsigned long flags;
 	int ret = 0;
 
+	if (ar_pci->pci_ps == 0)
+		return ret;
+
 	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 
 	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
@@ -502,6 +560,9 @@
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	unsigned long flags;
 
+	if (ar_pci->pci_ps == 0)
+		return;
+
 	spin_lock_irqsave(&ar_pci->ps_lock, flags);
 
 	ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
@@ -544,6 +605,11 @@
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	unsigned long flags;
 
+	if (ar_pci->pci_ps == 0) {
+		ath10k_pci_force_sleep(ar);
+		return;
+	}
+
 	del_timer_sync(&ar_pci->ps_timer);
 
 	spin_lock_irqsave(&ar_pci->ps_lock, flags);
@@ -682,8 +748,6 @@
 	dma_addr_t paddr;
 	int ret;
 
-	lockdep_assert_held(&ar_pci->ce_lock);
-
 	skb = dev_alloc_skb(pipe->buf_sz);
 	if (!skb)
 		return -ENOMEM;
@@ -701,9 +765,10 @@
 
 	ATH10K_SKB_RXCB(skb)->paddr = paddr;
 
+	spin_lock_bh(&ar_pci->ce_lock);
 	ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
+	spin_unlock_bh(&ar_pci->ce_lock);
 	if (ret) {
-		ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 		dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
 				 DMA_FROM_DEVICE);
 		dev_kfree_skb_any(skb);
@@ -713,25 +778,27 @@
 	return 0;
 }
 
-static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
+static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 {
 	struct ath10k *ar = pipe->hif_ce_state;
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 	int ret, num;
 
-	lockdep_assert_held(&ar_pci->ce_lock);
-
 	if (pipe->buf_sz == 0)
 		return;
 
 	if (!ce_pipe->dest_ring)
 		return;
 
+	spin_lock_bh(&ar_pci->ce_lock);
 	num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+	spin_unlock_bh(&ar_pci->ce_lock);
 	while (num--) {
 		ret = __ath10k_pci_rx_post_buf(pipe);
 		if (ret) {
+			if (ret == -ENOSPC)
+				break;
 			ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 			mod_timer(&ar_pci->rx_post_retry, jiffies +
 				  ATH10K_PCI_RX_POST_RETRY_MS);
@@ -740,25 +807,13 @@
 	}
 }
 
-static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
-{
-	struct ath10k *ar = pipe->hif_ce_state;
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-	spin_lock_bh(&ar_pci->ce_lock);
-	__ath10k_pci_rx_post_pipe(pipe);
-	spin_unlock_bh(&ar_pci->ce_lock);
-}
-
 static void ath10k_pci_rx_post(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	int i;
 
-	spin_lock_bh(&ar_pci->ce_lock);
 	for (i = 0; i < CE_COUNT; i++)
-		__ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
-	spin_unlock_bh(&ar_pci->ce_lock);
+		ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
 }
 
 static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
@@ -775,6 +830,7 @@
 	switch (ar->hw_rev) {
 	case ATH10K_HW_QCA988X:
 	case ATH10K_HW_QCA6174:
+	case ATH10K_HW_QCA9377:
 		val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 					  CORE_CTRL_ADDRESS) &
 		       0x7ff) << 21;
@@ -858,9 +914,8 @@
 			goto done;
 
 		i = 0;
-		while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
-							    &completed_nbytes,
-							    &id) != 0) {
+		while (ath10k_ce_completed_send_next_nolock(ce_diag,
+							    NULL) != 0) {
 			mdelay(1);
 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 				ret = -EBUSY;
@@ -868,16 +923,6 @@
 			}
 		}
 
-		if (nbytes != completed_nbytes) {
-			ret = -EIO;
-			goto done;
-		}
-
-		if (buf != (u32)address) {
-			ret = -EIO;
-			goto done;
-		}
-
 		i = 0;
 		while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
 							    &completed_nbytes,
@@ -1031,9 +1076,8 @@
 			goto done;
 
 		i = 0;
-		while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
-							    &completed_nbytes,
-							    &id) != 0) {
+		while (ath10k_ce_completed_send_next_nolock(ce_diag,
+							    NULL) != 0) {
 			mdelay(1);
 
 			if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -1042,16 +1086,6 @@
 			}
 		}
 
-		if (nbytes != completed_nbytes) {
-			ret = -EIO;
-			goto done;
-		}
-
-		if (buf != ce_data) {
-			ret = -EIO;
-			goto done;
-		}
-
 		i = 0;
 		while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
 							    &completed_nbytes,
@@ -1102,20 +1136,14 @@
 }
 
 /* Called by lower (CE) layer when a send to Target completes. */
-static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
+static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
 {
 	struct ath10k *ar = ce_state->ar;
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
 	struct sk_buff_head list;
 	struct sk_buff *skb;
-	u32 ce_data;
-	unsigned int nbytes;
-	unsigned int transfer_id;
 
 	__skb_queue_head_init(&list);
-	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
-					     &nbytes, &transfer_id) == 0) {
+	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
 		/* no need to call tx completion for NULL pointers */
 		if (skb == NULL)
 			continue;
@@ -1124,16 +1152,16 @@
 	}
 
 	while ((skb = __skb_dequeue(&list)))
-		cb->tx_completion(ar, skb);
+		ath10k_htc_tx_completion_handler(ar, skb);
 }
 
-/* Called by lower (CE) layer when data is received from the Target. */
-static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
+static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
+				     void (*callback)(struct ath10k *ar,
+						      struct sk_buff *skb))
 {
 	struct ath10k *ar = ce_state->ar;
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
-	struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
 	struct sk_buff *skb;
 	struct sk_buff_head list;
 	void *transfer_context;
@@ -1168,12 +1196,52 @@
 		ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
 				skb->data, skb->len);
 
-		cb->rx_completion(ar, skb);
+		callback(ar, skb);
 	}
 
 	ath10k_pci_rx_post_pipe(pipe_info);
 }
 
+/* Called by lower (CE) layer when data is received from the Target. */
+static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+/* Called by lower (CE) layer when a send to HTT Target completes. */
+static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	struct ath10k *ar = ce_state->ar;
+	struct sk_buff *skb;
+
+	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+		/* no need to call tx completion for NULL pointers */
+		if (!skb)
+			continue;
+
+		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+				 skb->len, DMA_TO_DEVICE);
+		ath10k_htt_hif_tx_complete(ar, skb);
+	}
+}
+
+static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
+{
+	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+	ath10k_htt_t2h_msg_handler(ar, skb);
+}
+
+/* Called by lower (CE) layer when HTT data is received from the Target. */
+static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+	/* CE4 polling needs to be done whenever CE pipe which transports
+	 * HTT Rx (target->host) is processed.
+	 */
+	ath10k_ce_per_engine_service(ce_state->ar, 4);
+
+	ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+}
+
 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
 				struct ath10k_hif_sg_item *items, int n_items)
 {
@@ -1343,17 +1411,6 @@
 	ath10k_ce_per_engine_service(ar, pipe);
 }
 
-static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
-					 struct ath10k_hif_cb *callbacks)
-{
-	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
-
-	memcpy(&ar_pci->msg_callbacks_current, callbacks,
-	       sizeof(ar_pci->msg_callbacks_current));
-}
-
 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -1368,10 +1425,8 @@
 	del_timer_sync(&ar_pci->rx_post_retry);
 }
 
-static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
-					      u16 service_id, u8 *ul_pipe,
-					      u8 *dl_pipe, int *ul_is_polled,
-					      int *dl_is_polled)
+static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+					      u8 *ul_pipe, u8 *dl_pipe)
 {
 	const struct service_to_pipe *entry;
 	bool ul_set = false, dl_set = false;
@@ -1379,9 +1434,6 @@
 
 	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
 
-	/* polling for received messages not supported */
-	*dl_is_polled = 0;
-
 	for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
 		entry = &target_service_to_ce_map_wlan[i];
 
@@ -1415,25 +1467,17 @@
 	if (WARN_ON(!ul_set || !dl_set))
 		return -ENOENT;
 
-	*ul_is_polled =
-		(host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
-
 	return 0;
 }
 
 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
 					    u8 *ul_pipe, u8 *dl_pipe)
 {
-	int ul_is_polled, dl_is_polled;
-
 	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
 
 	(void)ath10k_pci_hif_map_service_to_pipe(ar,
 						 ATH10K_HTC_SVC_ID_RSVD_CTRL,
-						 ul_pipe,
-						 dl_pipe,
-						 &ul_is_polled,
-						 &dl_is_polled);
+						 ul_pipe, dl_pipe);
 }
 
 static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
@@ -1443,6 +1487,7 @@
 	switch (ar->hw_rev) {
 	case ATH10K_HW_QCA988X:
 	case ATH10K_HW_QCA6174:
+	case ATH10K_HW_QCA9377:
 		val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 					CORE_CTRL_ADDRESS);
 		val &= ~CORE_CTRL_PCIE_REG_31_MASK;
@@ -1464,6 +1509,7 @@
 	switch (ar->hw_rev) {
 	case ATH10K_HW_QCA988X:
 	case ATH10K_HW_QCA6174:
+	case ATH10K_HW_QCA9377:
 		val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 					CORE_CTRL_ADDRESS);
 		val |= CORE_CTRL_PCIE_REG_31_MASK;
@@ -1504,6 +1550,7 @@
 static int ath10k_pci_hif_start(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
 
 	ath10k_pci_irq_enable(ar);
@@ -1553,7 +1600,6 @@
 	struct ath10k_pci *ar_pci;
 	struct ath10k_ce_pipe *ce_pipe;
 	struct ath10k_ce_ring *ce_ring;
-	struct ce_desc *ce_desc;
 	struct sk_buff *skb;
 	int i;
 
@@ -1568,10 +1614,6 @@
 	if (!pci_pipe->buf_sz)
 		return;
 
-	ce_desc = ce_ring->shadow_base;
-	if (WARN_ON(!ce_desc))
-		return;
-
 	for (i = 0; i < ce_ring->nentries; i++) {
 		skb = ce_ring->per_transfer_context[i];
 		if (!skb)
@@ -1579,7 +1621,7 @@
 
 		ce_ring->per_transfer_context[i] = NULL;
 
-		ar_pci->msg_callbacks_current.tx_completion(ar, skb);
+		ath10k_htc_tx_completion_handler(ar, skb);
 	}
 }
 
@@ -1745,12 +1787,8 @@
 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
 {
 	struct bmi_xfer *xfer;
-	u32 ce_data;
-	unsigned int nbytes;
-	unsigned int transfer_id;
 
-	if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
-					  &nbytes, &transfer_id))
+	if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
 		return;
 
 	xfer->tx_done = true;
@@ -1840,6 +1878,8 @@
 			return 9;
 		}
 		break;
+	case QCA9377_1_0_DEVICE_ID:
+		return 2;
 	}
 
 	ath10k_warn(ar, "unknown number of banks, assuming 1\n");
@@ -1999,9 +2039,7 @@
 		pipe->pipe_num = i;
 		pipe->hif_ce_state = ar;
 
-		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
-					   ath10k_pci_ce_send_done,
-					   ath10k_pci_ce_recv_data);
+		ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
 		if (ret) {
 			ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
 				   i, ret);
@@ -2257,7 +2295,7 @@
 	ret = ath10k_pci_wait_for_target_init(ar);
 	if (ret) {
 		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
-				ret);
+			    ret);
 		return ret;
 	}
 
@@ -2302,6 +2340,8 @@
 		return ath10k_pci_qca988x_chip_reset(ar);
 	else if (QCA_REV_6174(ar))
 		return ath10k_pci_qca6174_chip_reset(ar);
+	else if (QCA_REV_9377(ar))
+		return ath10k_pci_qca6174_chip_reset(ar);
 	else if (QCA_REV_99X0(ar))
 		return ath10k_pci_qca99x0_chip_reset(ar);
 	else
@@ -2397,6 +2437,15 @@
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	struct pci_dev *pdev = ar_pci->pdev;
 	u32 val;
+	int ret = 0;
+
+	if (ar_pci->pci_ps == 0) {
+		ret = ath10k_pci_force_wake(ar);
+		if (ret) {
+			ath10k_err(ar, "failed to wake up target: %d\n", ret);
+			return ret;
+		}
+	}
 
 	/* Suspend/Resume resets the PCI configuration space, so we have to
 	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
@@ -2407,7 +2456,7 @@
 	if ((val & 0x0000ff00) != 0)
 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
-	return 0;
+	return ret;
 }
 #endif
 
@@ -2421,7 +2470,6 @@
 	.map_service_to_pipe	= ath10k_pci_hif_map_service_to_pipe,
 	.get_default_pipe	= ath10k_pci_hif_get_default_pipe,
 	.send_complete_check	= ath10k_pci_hif_send_complete_check,
-	.set_callbacks		= ath10k_pci_hif_set_callbacks,
 	.get_free_queue_number	= ath10k_pci_hif_get_free_queue_number,
 	.power_up		= ath10k_pci_hif_power_up,
 	.power_down		= ath10k_pci_hif_power_down,
@@ -2501,6 +2549,16 @@
 {
 	struct ath10k *ar = arg;
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+	int ret;
+
+	if (ar_pci->pci_ps == 0) {
+		ret = ath10k_pci_force_wake(ar);
+		if (ret) {
+			ath10k_warn(ar, "failed to wake device up on irq: %d\n",
+				    ret);
+			return IRQ_NONE;
+		}
+	}
 
 	if (ar_pci->num_msi_intrs == 0) {
 		if (!ath10k_pci_irq_pending(ar))
@@ -2609,12 +2667,9 @@
 		return ath10k_pci_request_irq_legacy(ar);
 	case 1:
 		return ath10k_pci_request_irq_msi(ar);
-	case MSI_NUM_REQUEST:
+	default:
 		return ath10k_pci_request_irq_msix(ar);
 	}
-
-	ath10k_warn(ar, "unknown irq configuration upon request\n");
-	return -EINVAL;
 }
 
 static void ath10k_pci_free_irq(struct ath10k *ar)
@@ -2657,7 +2712,7 @@
 
 	/* Try MSI-X */
 	if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
-		ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
+		ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1;
 		ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
 					   ar_pci->num_msi_intrs);
 		if (ret > 0)
@@ -2705,18 +2760,13 @@
 	switch (ar_pci->num_msi_intrs) {
 	case 0:
 		ath10k_pci_deinit_irq_legacy(ar);
-		return 0;
-	case 1:
-		/* fall-through */
-	case MSI_NUM_REQUEST:
-		pci_disable_msi(ar_pci->pdev);
-		return 0;
+		break;
 	default:
 		pci_disable_msi(ar_pci->pdev);
+		break;
 	}
 
-	ath10k_warn(ar, "unknown irq configuration upon deinit\n");
-	return -EINVAL;
+	return 0;
 }
 
 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
@@ -2908,17 +2958,25 @@
 	struct ath10k_pci *ar_pci;
 	enum ath10k_hw_rev hw_rev;
 	u32 chip_id;
+	bool pci_ps;
 
 	switch (pci_dev->device) {
 	case QCA988X_2_0_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA988X;
+		pci_ps = false;
 		break;
 	case QCA6164_2_1_DEVICE_ID:
 	case QCA6174_2_1_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA6174;
+		pci_ps = true;
 		break;
 	case QCA99X0_2_0_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA99X0;
+		pci_ps = false;
+		break;
+	case QCA9377_1_0_DEVICE_ID:
+		hw_rev = ATH10K_HW_QCA9377;
+		pci_ps = true;
 		break;
 	default:
 		WARN_ON(1);
@@ -2932,19 +2990,21 @@
 		return -ENOMEM;
 	}
 
-	ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+		   pdev->vendor, pdev->device,
+		   pdev->subsystem_vendor, pdev->subsystem_device);
 
 	ar_pci = ath10k_pci_priv(ar);
 	ar_pci->pdev = pdev;
 	ar_pci->dev = &pdev->dev;
 	ar_pci->ar = ar;
 	ar->dev_id = pci_dev->device;
+	ar_pci->pci_ps = pci_ps;
 
-	if (pdev->subsystem_vendor || pdev->subsystem_device)
-		scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
-			  "%04x:%04x:%04x:%04x",
-			  pdev->vendor, pdev->device,
-			  pdev->subsystem_vendor, pdev->subsystem_device);
+	ar->id.vendor = pdev->vendor;
+	ar->id.device = pdev->device;
+	ar->id.subsystem_vendor = pdev->subsystem_vendor;
+	ar->id.subsystem_device = pdev->subsystem_device;
 
 	spin_lock_init(&ar_pci->ce_lock);
 	spin_lock_init(&ar_pci->ps_lock);
@@ -2970,6 +3030,14 @@
 	ath10k_pci_ce_deinit(ar);
 	ath10k_pci_irq_disable(ar);
 
+	if (ar_pci->pci_ps == 0) {
+		ret = ath10k_pci_force_wake(ar);
+		if (ret) {
+			ath10k_warn(ar, "failed to wake up device : %d\n", ret);
+			goto err_free_pipes;
+		}
+	}
+
 	ret = ath10k_pci_init_irq(ar);
 	if (ret) {
 		ath10k_err(ar, "failed to init irqs: %d\n", ret);
@@ -3098,13 +3166,20 @@
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
 
 /* QCA6174 2.1 firmware files */
 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
 
 /* QCA6174 3.1 firmware files */
 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
+/* QCA9377 1.0 firmware files */
+MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 8d364fb..f91bf33 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -175,8 +175,6 @@
 
 	struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
 
-	struct ath10k_hif_cb msg_callbacks_current;
-
 	/* Copy Engine used for Diagnostic Accesses */
 	struct ath10k_ce_pipe *ce_diag;
 
@@ -221,6 +219,12 @@
 	 * powersave register state changes.
 	 */
 	bool ps_awake;
+
+	/* pci power save, disable for QCA988X and QCA99X0.
+	 * Writing 'false' to this variable avoids frequent locking
+	 * on MMIO read/write.
+	 */
+	bool pci_ps;
 };
 
 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -230,7 +234,8 @@
 
 #define ATH10K_PCI_RX_POST_RETRY_MS 50
 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
-#define PCIE_WAKE_TIMEOUT 10000	/* 10ms */
+#define PCIE_WAKE_TIMEOUT 30000	/* 30ms */
+#define PCIE_WAKE_LATE_US 10000	/* 10ms */
 
 #define BAR_NUM 0
 
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 768bef6..05a421b 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -450,6 +450,9 @@
 #define QCA6174_BOARD_DATA_SZ     8192
 #define QCA6174_BOARD_EXT_DATA_SZ 0
 
+#define QCA9377_BOARD_DATA_SZ     QCA6174_BOARD_DATA_SZ
+#define QCA9377_BOARD_EXT_DATA_SZ 0
+
 #define QCA99X0_BOARD_DATA_SZ	  12288
 #define QCA99X0_BOARD_EXT_DATA_SZ 0
 
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index b084f88..1d5a2fd 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -139,11 +139,181 @@
 	return cfg80211_testmode_reply(skb);
 }
 
-static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
+static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar)
+{
+	size_t len, magic_len, ie_len;
+	struct ath10k_fw_ie *hdr;
+	char filename[100];
+	__le32 *version;
+	const u8 *data;
+	int ie_id, ret;
+
+	snprintf(filename, sizeof(filename), "%s/%s",
+		 ar->hw_params.fw.dir, ATH10K_FW_UTF_API2_FILE);
+
+	/* load utf firmware image */
+	ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
+	if (ret) {
+		ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
+			    filename, ret);
+		return ret;
+	}
+
+	data = ar->testmode.utf->data;
+	len = ar->testmode.utf->size;
+
+	/* FIXME: call release_firmware() in error cases */
+
+	/* magic also includes the null byte, check that as well */
+	magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
+
+	if (len < magic_len) {
+		ath10k_err(ar, "utf firmware file is too small to contain magic\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
+		ath10k_err(ar, "invalid firmware magic\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* jump over the padding */
+	magic_len = ALIGN(magic_len, 4);
+
+	len -= magic_len;
+	data += magic_len;
+
+	/* loop elements */
+	while (len > sizeof(struct ath10k_fw_ie)) {
+		hdr = (struct ath10k_fw_ie *)data;
+
+		ie_id = le32_to_cpu(hdr->id);
+		ie_len = le32_to_cpu(hdr->len);
+
+		len -= sizeof(*hdr);
+		data += sizeof(*hdr);
+
+		if (len < ie_len) {
+			ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
+				   ie_id, len, ie_len);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		switch (ie_id) {
+		case ATH10K_FW_IE_FW_VERSION:
+			if (ie_len > sizeof(ar->testmode.utf_version) - 1)
+				break;
+
+			memcpy(ar->testmode.utf_version, data, ie_len);
+			ar->testmode.utf_version[ie_len] = '\0';
+
+			ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+				   "testmode found fw utf version %s\n",
+				   ar->testmode.utf_version);
+			break;
+		case ATH10K_FW_IE_TIMESTAMP:
+			/* ignore timestamp, but don't warn about it either */
+			break;
+		case ATH10K_FW_IE_FW_IMAGE:
+			ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+				   "testmode found fw image ie (%zd B)\n",
+				   ie_len);
+
+			ar->testmode.utf_firmware_data = data;
+			ar->testmode.utf_firmware_len = ie_len;
+			break;
+		case ATH10K_FW_IE_WMI_OP_VERSION:
+			if (ie_len != sizeof(u32))
+				break;
+			version = (__le32 *)data;
+			ar->testmode.op_version = le32_to_cpup(version);
+			ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw ie wmi op version %d\n",
+				   ar->testmode.op_version);
+			break;
+		default:
+			ath10k_warn(ar, "Unknown testmode FW IE: %u\n",
+				    le32_to_cpu(hdr->id));
+			break;
+		}
+		/* jump over the padding */
+		ie_len = ALIGN(ie_len, 4);
+
+		len -= ie_len;
+		data += ie_len;
+	}
+
+	if (!ar->testmode.utf_firmware_data || !ar->testmode.utf_firmware_len) {
+		ath10k_err(ar, "No ATH10K_FW_IE_FW_IMAGE found\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	return 0;
+
+err:
+	release_firmware(ar->testmode.utf);
+
+	return ret;
+}
+
+static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
 {
 	char filename[100];
 	int ret;
 
+	snprintf(filename, sizeof(filename), "%s/%s",
+		 ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
+
+	/* load utf firmware image */
+	ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
+	if (ret) {
+		ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
+			    filename, ret);
+		return ret;
+	}
+
+	/* We didn't find FW UTF API 1 ("utf.bin") does not advertise
+	 * firmware features. Do an ugly hack where we force the firmware
+	 * features to match with 10.1 branch so that wmi.c will use the
+	 * correct WMI interface.
+	 */
+
+	ar->testmode.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+	ar->testmode.utf_firmware_data = ar->testmode.utf->data;
+	ar->testmode.utf_firmware_len = ar->testmode.utf->size;
+
+	return 0;
+}
+
+static int ath10k_tm_fetch_firmware(struct ath10k *ar)
+{
+	int ret;
+
+	ret = ath10k_tm_fetch_utf_firmware_api_2(ar);
+	if (ret == 0) {
+		ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2");
+		return 0;
+	}
+
+	ret = ath10k_tm_fetch_utf_firmware_api_1(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1");
+
+	return 0;
+}
+
+static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
+{
+	const char *ver;
+	int ret;
+
 	ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n");
 
 	mutex_lock(&ar->conf_mutex);
@@ -165,36 +335,27 @@
 		goto err;
 	}
 
-	snprintf(filename, sizeof(filename), "%s/%s",
-		 ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
-
-	/* load utf firmware image */
-	ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
+	ret = ath10k_tm_fetch_firmware(ar);
 	if (ret) {
-		ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
-			    filename, ret);
+		ath10k_err(ar, "failed to fetch UTF firmware: %d", ret);
 		goto err;
 	}
 
 	spin_lock_bh(&ar->data_lock);
-
 	ar->testmode.utf_monitor = true;
-
 	spin_unlock_bh(&ar->data_lock);
-
 	BUILD_BUG_ON(sizeof(ar->fw_features) !=
 		     sizeof(ar->testmode.orig_fw_features));
 
 	memcpy(ar->testmode.orig_fw_features, ar->fw_features,
 	       sizeof(ar->fw_features));
 	ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
-
-	/* utf.bin firmware image does not advertise firmware features. Do
-	 * an ugly hack where we force the firmware features so that wmi.c
-	 * will use the correct WMI interface.
-	 */
 	memset(ar->fw_features, 0, sizeof(ar->fw_features));
-	ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+
+	ar->wmi.op_version = ar->testmode.op_version;
+
+	ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n",
+		   ar->wmi.op_version);
 
 	ret = ath10k_hif_power_up(ar);
 	if (ret) {
@@ -212,7 +373,12 @@
 
 	ar->state = ATH10K_STATE_UTF;
 
-	ath10k_info(ar, "UTF firmware started\n");
+	if (strlen(ar->testmode.utf_version) > 0)
+		ver = ar->testmode.utf_version;
+	else
+		ver = "API 1";
+
+	ath10k_info(ar, "UTF firmware %s started\n", ver);
 
 	mutex_unlock(&ar->conf_mutex);
 
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index 1a899d7..60fe562 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -215,6 +215,6 @@
 
 void ath10k_thermal_unregister(struct ath10k *ar)
 {
-	thermal_cooling_device_unregister(ar->thermal.cdev);
 	sysfs_remove_link(&ar->dev->kobj, "cooling_device");
+	thermal_cooling_device_unregister(ar->thermal.cdev);
 }
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index e4a9c4c..6d1105a 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -52,6 +52,9 @@
 	struct ieee80211_tx_info *info;
 	struct ath10k_skb_cb *skb_cb;
 	struct sk_buff *msdu;
+	struct ieee80211_hdr *hdr;
+	__le16 fc;
+	bool limit_mgmt_desc = false;
 
 	ath10k_dbg(ar, ATH10K_DBG_HTT,
 		   "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
@@ -72,21 +75,23 @@
 		spin_unlock_bh(&htt->tx_lock);
 		return;
 	}
+
+	hdr = (struct ieee80211_hdr *)msdu->data;
+	fc = hdr->frame_control;
+
+	if (unlikely(ieee80211_is_mgmt(fc)) &&
+	    ar->hw_params.max_probe_resp_desc_thres)
+		limit_mgmt_desc = true;
+
 	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
-	__ath10k_htt_tx_dec_pending(htt);
+	__ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
 	if (htt->num_pending_tx == 0)
 		wake_up(&htt->empty_tx_wq);
 	spin_unlock_bh(&htt->tx_lock);
 
 	skb_cb = ATH10K_SKB_CB(msdu);
-
 	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 
-	if (skb_cb->htt.txbuf)
-		dma_pool_free(htt->tx_pool,
-			      skb_cb->htt.txbuf,
-			      skb_cb->htt.txbuf_paddr);
-
 	ath10k_report_offchan_tx(htt->ar, msdu);
 
 	info = IEEE80211_SKB_CB(msdu);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 248ffc3..8f4f6a8 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -177,6 +177,15 @@
 						const struct wmi_tdls_peer_capab_arg *cap,
 						const struct wmi_channel_arg *chan);
 	struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
+	struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
+						   u32 param);
+	void (*fw_stats_fill)(struct ath10k *ar,
+			      struct ath10k_fw_stats *fw_stats,
+			      char *buf);
+	struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
+							u8 enable,
+							u32 detect_level,
+							u32 detect_margin);
 };
 
 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -1270,4 +1279,52 @@
 	return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
 }
 
+static inline int
+ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pdev_get_tpc_config)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
+
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->pdev_get_tpc_config_cmdid);
+}
+
+static inline int
+ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
+			 char *buf)
+{
+	if (!ar->wmi.ops->fw_stats_fill)
+		return -EOPNOTSUPP;
+
+	ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
+	return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
+				    u32 detect_level, u32 detect_margin)
+{
+	struct sk_buff *skb;
+
+	if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
+		return -EOPNOTSUPP;
+
+	skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
+							detect_level,
+							detect_margin);
+
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	return ath10k_wmi_cmd_send(ar, skb,
+				   ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
+}
+
 #endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index b5849b3..6fbd17b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -23,6 +23,7 @@
 #include "wmi-ops.h"
 #include "wmi-tlv.h"
 #include "p2p.h"
+#include "testmode.h"
 
 /***************/
 /* TLV helpers */
@@ -419,6 +420,7 @@
 {
 	struct wmi_cmd_hdr *cmd_hdr;
 	enum wmi_tlv_event_id id;
+	bool consumed;
 
 	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
 	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -428,6 +430,18 @@
 
 	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
 
+	consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+	/* Ready event must be handled normally also in UTF mode so that we
+	 * know the UTF firmware has booted, others we are just bypass WMI
+	 * events to testmode.
+	 */
+	if (consumed && id != WMI_TLV_READY_EVENTID) {
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "wmi tlv testmode consumed 0x%x\n", id);
+		goto out;
+	}
+
 	switch (id) {
 	case WMI_TLV_MGMT_RX_EVENTID:
 		ath10k_wmi_event_mgmt_rx(ar, skb);
@@ -3468,6 +3482,7 @@
 	.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
 	.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
 	.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
+	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
 };
 
 /************/
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index ce01107..7569db0 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -148,6 +148,7 @@
 	.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
 	.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
@@ -313,6 +314,7 @@
 	.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
 	.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
@@ -477,6 +479,7 @@
 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+	.pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS,
 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
@@ -1407,6 +1410,7 @@
 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
 	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
 	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
 	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
@@ -2475,6 +2479,47 @@
 	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
 }
 
+static void
+ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
+				   struct ath10k_fw_stats_pdev *dst)
+{
+	dst->comp_queued = __le32_to_cpu(src->comp_queued);
+	dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
+	dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
+	dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
+	dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
+	dst->local_enqued = __le32_to_cpu(src->local_enqued);
+	dst->local_freed = __le32_to_cpu(src->local_freed);
+	dst->hw_queued = __le32_to_cpu(src->hw_queued);
+	dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
+	dst->underrun = __le32_to_cpu(src->underrun);
+	dst->tx_abort = __le32_to_cpu(src->tx_abort);
+	dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
+	dst->tx_ko = __le32_to_cpu(src->tx_ko);
+	dst->data_rc = __le32_to_cpu(src->data_rc);
+	dst->self_triggers = __le32_to_cpu(src->self_triggers);
+	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
+	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
+	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
+	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
+	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
+	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
+	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
+	dst->hw_paused = __le32_to_cpu(src->hw_paused);
+	dst->seq_posted = __le32_to_cpu(src->seq_posted);
+	dst->seq_failed_queueing =
+		__le32_to_cpu(src->seq_failed_queueing);
+	dst->seq_completed = __le32_to_cpu(src->seq_completed);
+	dst->seq_restarted = __le32_to_cpu(src->seq_restarted);
+	dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted);
+	dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush);
+	dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
+	dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated);
+	dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed);
+	dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
+	dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired);
+}
+
 void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
 				   struct ath10k_fw_stats_pdev *dst)
 {
@@ -2785,6 +2830,86 @@
 	return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
+					    struct sk_buff *skb,
+					    struct ath10k_fw_stats *stats)
+{
+	const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+	u32 num_pdev_stats;
+	u32 num_pdev_ext_stats;
+	u32 num_vdev_stats;
+	u32 num_peer_stats;
+	int i;
+
+	if (!skb_pull(skb, sizeof(*ev)))
+		return -EPROTO;
+
+	num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+	num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+	num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+	num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+	for (i = 0; i < num_pdev_stats; i++) {
+		const struct wmi_10_4_pdev_stats *src;
+		struct ath10k_fw_stats_pdev *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+		ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst);
+		ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+		dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs);
+		ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+
+		list_add_tail(&dst->list, &stats->pdevs);
+	}
+
+	for (i = 0; i < num_pdev_ext_stats; i++) {
+		const struct wmi_10_2_pdev_ext_stats *src;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		/* FIXME: expose values to userspace
+		 *
+		 * Note: Even though this loop seems to do nothing it is
+		 * required to parse following sub-structures properly.
+		 */
+	}
+
+	/* fw doesn't implement vdev stats */
+
+	for (i = 0; i < num_peer_stats; i++) {
+		const struct wmi_10_4_peer_stats *src;
+		struct ath10k_fw_stats_peer *dst;
+
+		src = (void *)skb->data;
+		if (!skb_pull(skb, sizeof(*src)))
+			return -EPROTO;
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+		dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+		dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+		dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+		/* FIXME: expose 10.4 specific values */
+
+		list_add_tail(&dst->list, &stats->peers);
+	}
+
+	return 0;
+}
+
 void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
 {
 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
@@ -3018,8 +3143,6 @@
 			memcpy(skb_put(bcn, arvif->u.ap.noa_len),
 			       arvif->u.ap.noa_data,
 			       arvif->u.ap.noa_len);
-
-	return;
 }
 
 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
@@ -3507,7 +3630,7 @@
 							  tsf);
 			if (res < 0) {
 				ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
-					    res);
+					   res);
 				return;
 			}
 			break;
@@ -3835,9 +3958,258 @@
 	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
 }
 
+static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
+				     struct wmi_pdev_tpc_config_event *ev,
+				     u32 rate_idx, u32 num_chains,
+				     u32 rate_code, u8 type)
+{
+	u8 tpc, num_streams, preamble, ch, stm_idx;
+
+	num_streams = ATH10K_HW_NSS(rate_code);
+	preamble = ATH10K_HW_PREAMBLE(rate_code);
+	ch = num_chains - 1;
+
+	tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
+
+	if (__le32_to_cpu(ev->num_tx_chain) <= 1)
+		goto out;
+
+	if (preamble == WMI_RATE_PREAMBLE_CCK)
+		goto out;
+
+	stm_idx = num_streams - 1;
+	if (num_chains <= num_streams)
+		goto out;
+
+	switch (type) {
+	case WMI_TPC_TABLE_TYPE_STBC:
+		tpc = min_t(u8, tpc,
+			    ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
+		break;
+	case WMI_TPC_TABLE_TYPE_TXBF:
+		tpc = min_t(u8, tpc,
+			    ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
+		break;
+	case WMI_TPC_TABLE_TYPE_CDD:
+		tpc = min_t(u8, tpc,
+			    ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
+		break;
+	default:
+		ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
+		tpc = 0;
+		break;
+	}
+
+out:
+	return tpc;
+}
+
+static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
+					  struct wmi_pdev_tpc_config_event *ev,
+					  struct ath10k_tpc_stats *tpc_stats,
+					  u8 *rate_code, u16 *pream_table, u8 type)
+{
+	u32 i, j, pream_idx, flags;
+	u8 tpc[WMI_TPC_TX_N_CHAIN];
+	char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+	char buff[WMI_TPC_BUF_SIZE];
+
+	flags = __le32_to_cpu(ev->flags);
+
+	switch (type) {
+	case WMI_TPC_TABLE_TYPE_CDD:
+		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
+			ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
+			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+			return;
+		}
+		break;
+	case WMI_TPC_TABLE_TYPE_STBC:
+		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
+			ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
+			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+			return;
+		}
+		break;
+	case WMI_TPC_TABLE_TYPE_TXBF:
+		if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
+			ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
+			tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+			return;
+		}
+		break;
+	default:
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "invalid table type in wmi tpc event: %d\n", type);
+		return;
+	}
+
+	pream_idx = 0;
+	for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
+		memset(tpc_value, 0, sizeof(tpc_value));
+		memset(buff, 0, sizeof(buff));
+		if (i == pream_table[pream_idx])
+			pream_idx++;
+
+		for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
+			if (j >= __le32_to_cpu(ev->num_tx_chain))
+				break;
+
+			tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
+							    rate_code[i],
+							    type);
+			snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
+			strncat(tpc_value, buff, strlen(buff));
+		}
+		tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
+		tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
+		memcpy(tpc_stats->tpc_table[type].tpc_value[i],
+		       tpc_value, sizeof(tpc_value));
+	}
+}
+
 void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
 {
-	ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
+	u32 i, j, pream_idx, num_tx_chain;
+	u8 rate_code[WMI_TPC_RATE_MAX], rate_idx;
+	u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
+	struct wmi_pdev_tpc_config_event *ev;
+	struct ath10k_tpc_stats *tpc_stats;
+
+	ev = (struct wmi_pdev_tpc_config_event *)skb->data;
+
+	tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+	if (!tpc_stats)
+		return;
+
+	/* Create the rate code table based on the chains supported */
+	rate_idx = 0;
+	pream_idx = 0;
+
+	/* Fill CCK rate code */
+	for (i = 0; i < 4; i++) {
+		rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
+		rate_idx++;
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	/* Fill OFDM rate code */
+	for (i = 0; i < 8; i++) {
+		rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
+		rate_idx++;
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+
+	/* Fill HT20 rate code */
+	for (i = 0; i < num_tx_chain; i++) {
+		for (j = 0; j < 8; j++) {
+			rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
+			rate_idx++;
+		}
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	/* Fill HT40 rate code */
+	for (i = 0; i < num_tx_chain; i++) {
+		for (j = 0; j < 8; j++) {
+			rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
+			rate_idx++;
+		}
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	/* Fill VHT20 rate code */
+	for (i = 0; i < __le32_to_cpu(ev->num_tx_chain); i++) {
+		for (j = 0; j < 10; j++) {
+			rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+			rate_idx++;
+		}
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	/* Fill VHT40 rate code */
+	for (i = 0; i < num_tx_chain; i++) {
+		for (j = 0; j < 10; j++) {
+			rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+			rate_idx++;
+		}
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	/* Fill VHT80 rate code */
+	for (i = 0; i < num_tx_chain; i++) {
+		for (j = 0; j < 10; j++) {
+			rate_code[rate_idx] =
+			ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+			rate_idx++;
+		}
+	}
+	pream_table[pream_idx] = rate_idx;
+	pream_idx++;
+
+	rate_code[rate_idx++] =
+		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
+	rate_code[rate_idx++] =
+		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+	rate_code[rate_idx++] =
+		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
+	rate_code[rate_idx++] =
+		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+	rate_code[rate_idx++] =
+		ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+
+	pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
+
+	tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
+	tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
+	tpc_stats->ctl = __le32_to_cpu(ev->ctl);
+	tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
+	tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
+	tpc_stats->twice_antenna_reduction =
+		__le32_to_cpu(ev->twice_antenna_reduction);
+	tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
+	tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
+	tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+	tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
+
+	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+				      rate_code, pream_table,
+				      WMI_TPC_TABLE_TYPE_CDD);
+	ath10k_tpc_config_disp_tables(ar, ev,  tpc_stats,
+				      rate_code, pream_table,
+				      WMI_TPC_TABLE_TYPE_STBC);
+	ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+				      rate_code, pream_table,
+				      WMI_TPC_TABLE_TYPE_TXBF);
+
+	ath10k_debug_tpc_stats_process(ar, tpc_stats);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
+		   __le32_to_cpu(ev->chan_freq),
+		   __le32_to_cpu(ev->phy_mode),
+		   __le32_to_cpu(ev->ctl),
+		   __le32_to_cpu(ev->reg_domain),
+		   a_sle32_to_cpu(ev->twice_antenna_gain),
+		   __le32_to_cpu(ev->twice_antenna_reduction),
+		   __le32_to_cpu(ev->power_limit),
+		   __le32_to_cpu(ev->twice_max_rd_power) / 2,
+		   __le32_to_cpu(ev->num_tx_chain),
+		   __le32_to_cpu(ev->rate_max));
 }
 
 void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
@@ -3917,6 +4289,53 @@
 	return 0;
 }
 
+static bool
+ath10k_wmi_is_host_mem_allocated(struct ath10k *ar,
+				 const struct wlan_host_mem_req **mem_reqs,
+				 u32 num_mem_reqs)
+{
+	u32 req_id, num_units, unit_size, num_unit_info;
+	u32 pool_size;
+	int i, j;
+	bool found;
+
+	if (ar->wmi.num_mem_chunks != num_mem_reqs)
+		return false;
+
+	for (i = 0; i < num_mem_reqs; ++i) {
+		req_id = __le32_to_cpu(mem_reqs[i]->req_id);
+		num_units = __le32_to_cpu(mem_reqs[i]->num_units);
+		unit_size = __le32_to_cpu(mem_reqs[i]->unit_size);
+		num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info);
+
+		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+			if (ar->num_active_peers)
+				num_units = ar->num_active_peers + 1;
+			else
+				num_units = ar->max_num_peers + 1;
+		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
+			num_units = ar->max_num_peers + 1;
+		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+			num_units = ar->max_num_vdevs + 1;
+		}
+
+		found = false;
+		for (j = 0; j < ar->wmi.num_mem_chunks; j++) {
+			if (ar->wmi.mem_chunks[j].req_id == req_id) {
+				pool_size = num_units * round_up(unit_size, 4);
+				if (ar->wmi.mem_chunks[j].len == pool_size) {
+					found = true;
+					break;
+				}
+			}
+		}
+		if (!found)
+			return false;
+	}
+
+	return true;
+}
+
 static int
 ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
 				   struct wmi_svc_rdy_ev_arg *arg)
@@ -3997,6 +4416,7 @@
 	struct wmi_svc_rdy_ev_arg arg = {};
 	u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
 	int ret;
+	bool allocated;
 
 	if (!skb) {
 		ath10k_warn(ar, "invalid service ready event skb\n");
@@ -4040,8 +4460,10 @@
 		ar->num_rf_chains = ar->max_spatial_stream;
 	}
 
-	ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
-	ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1;
+	if (!ar->cfg_tx_chainmask) {
+		ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1;
+		ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1;
+	}
 
 	if (strlen(ar->hw->wiphy->fw_version) == 0) {
 		snprintf(ar->hw->wiphy->fw_version,
@@ -4073,6 +4495,18 @@
 	 * and WMI_SERVICE_IRAM_TIDS, etc.
 	 */
 
+	allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs,
+						     num_mem_reqs);
+	if (allocated)
+		goto skip_mem_alloc;
+
+	/* Either this event is received during boot time or there is a change
+	 * in memory requirement from firmware when compared to last request.
+	 * Free any old memory and do a fresh allocation based on the current
+	 * memory requirement.
+	 */
+	ath10k_wmi_free_host_mem(ar);
+
 	for (i = 0; i < num_mem_reqs; ++i) {
 		req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
 		num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
@@ -4108,6 +4542,7 @@
 			return;
 	}
 
+skip_mem_alloc:
 	ath10k_dbg(ar, ATH10K_DBG_WMI,
 		   "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
 		   __le32_to_cpu(arg.min_tx_power),
@@ -4623,6 +5058,9 @@
 		ath10k_dbg(ar, ATH10K_DBG_WMI,
 			   "received event id %d not implemented\n", id);
 		break;
+	case WMI_10_4_UPDATE_STATS_EVENTID:
+		ath10k_wmi_event_update_stats(ar, skb);
+		break;
 	default:
 		ath10k_warn(ar, "Unknown eventid: %d\n", id);
 		break;
@@ -5029,7 +5467,7 @@
 	config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
 
-	config.rx_decap_mode	    = __cpu_to_le32(TARGET_10_4_RX_DECAP_MODE);
+	config.rx_decap_mode	    = __cpu_to_le32(ar->wmi.rx_decap_mode);
 	config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
 	config.bmiss_offload_max_vdev =
 			__cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
@@ -6295,6 +6733,505 @@
 	return skb;
 }
 
+static struct sk_buff *
+ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
+{
+	struct wmi_pdev_get_tpc_config_cmd *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
+	cmd->param = __cpu_to_le32(param);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi pdev get tcp config param:%d\n", param);
+	return skb;
+}
+
+size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head)
+{
+	struct ath10k_fw_stats_peer *i;
+	size_t num = 0;
+
+	list_for_each_entry(i, head, list)
+		++num;
+
+	return num;
+}
+
+size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head)
+{
+	struct ath10k_fw_stats_vdev *i;
+	size_t num = 0;
+
+	list_for_each_entry(i, head, list)
+		++num;
+
+	return num;
+}
+
+static void
+ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+				   char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n",
+			"ath10k PDEV stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+			"=================");
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Channel noise floor", pdev->ch_noise_floor);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"Channel TX power", pdev->chan_tx_power);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"TX frame count", pdev->tx_frame_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"RX frame count", pdev->rx_frame_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"RX clear count", pdev->rx_clear_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"Cycle count", pdev->cycle_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"PHY error count", pdev->phy_err_count);
+
+	*length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+				    char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"RTS bad count", pdev->rts_bad);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"RTS good count", pdev->rts_good);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"FCS bad count", pdev->fcs_bad);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"No beacon count", pdev->no_beacons);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"MIB int count", pdev->mib_int_count);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	*length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+				 char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+			 "ath10k PDEV TX stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HTT cookies queued", pdev->comp_queued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HTT cookies disp.", pdev->comp_delivered);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDU queued", pdev->msdu_enqued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDU queued", pdev->mpdu_enqued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDUs dropped", pdev->wmm_drop);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Local enqued", pdev->local_enqued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Local freed", pdev->local_freed);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HW queued", pdev->hw_queued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PPDUs reaped", pdev->hw_reaped);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Num underruns", pdev->underrun);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PPDUs cleaned", pdev->tx_abort);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDUs requed", pdev->mpdus_requed);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Excessive retries", pdev->tx_ko);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HW rate", pdev->data_rc);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Sched self tiggers", pdev->self_triggers);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Dropped due to SW retries",
+			 pdev->sw_retry_failure);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Illegal rate phy errors",
+			 pdev->illgl_rate_phy_err);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Pdev continuous xretry", pdev->pdev_cont_xretry);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "TX timeout", pdev->pdev_tx_timeout);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PDEV resets", pdev->pdev_resets);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PHY underrun", pdev->phy_underrun);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDU is more than txop limit", pdev->txop_ovf);
+	*length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+				 char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+			 "ath10k PDEV RX stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Mid PPDU route change",
+			 pdev->mid_ppdu_route_change);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Tot. number of statuses", pdev->status_rcvd);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 0", pdev->r0_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 1", pdev->r1_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 2", pdev->r2_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 3", pdev->r3_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDUs delivered to HTT", pdev->htt_msdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDUs delivered to HTT", pdev->htt_mpdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDUs delivered to stack", pdev->loc_msdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDUs delivered to stack", pdev->loc_mpdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Oversized AMSUs", pdev->oversize_amsdu);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PHY errors", pdev->phy_errs);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PHY errors drops", pdev->phy_err_drop);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+	*length = len;
+}
+
+static void
+ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
+			      char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+	int i;
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"vdev id", vdev->vdev_id);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"beacon snr", vdev->beacon_snr);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"data snr", vdev->data_snr);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"num rx frames", vdev->num_rx_frames);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"num rts fail", vdev->num_rts_fail);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"num rts success", vdev->num_rts_success);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"num rx err", vdev->num_rx_err);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"num rx discard", vdev->num_rx_discard);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"num tx not acked", vdev->num_tx_not_acked);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] %u\n",
+				"num tx frames", i,
+				vdev->num_tx_frames[i]);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] %u\n",
+				"num tx frames retries", i,
+				vdev->num_tx_frames_retries[i]);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] %u\n",
+				"num tx frames failures", i,
+				vdev->num_tx_frames_failures[i]);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] 0x%08x\n",
+				"tx rate history", i,
+				vdev->tx_rate_history[i]);
+
+	for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+		len += scnprintf(buf + len, buf_len - len,
+				"%25s [%02d] %u\n",
+				"beacon rssi history", i,
+				vdev->beacon_rssi_history[i]);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	*length = len;
+}
+
+static void
+ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
+			      char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+			"Peer MAC address", peer->peer_macaddr);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"Peer RSSI", peer->peer_rssi);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"Peer TX rate", peer->peer_tx_rate);
+	len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+			"Peer RX rate", peer->peer_rx_rate);
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	*length = len;
+}
+
+void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
+				      struct ath10k_fw_stats *fw_stats,
+				      char *buf)
+{
+	u32 len = 0;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+	const struct ath10k_fw_stats_pdev *pdev;
+	const struct ath10k_fw_stats_vdev *vdev;
+	const struct ath10k_fw_stats_peer *peer;
+	size_t num_peers;
+	size_t num_vdevs;
+
+	spin_lock_bh(&ar->data_lock);
+
+	pdev = list_first_entry_or_null(&fw_stats->pdevs,
+					struct ath10k_fw_stats_pdev, list);
+	if (!pdev) {
+		ath10k_warn(ar, "failed to get pdev stats\n");
+		goto unlock;
+	}
+
+	num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+	num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			 "ath10k VDEV stats", num_vdevs);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			 "ath10k PEER stats", num_peers);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+	list_for_each_entry(peer, &fw_stats->peers, list) {
+		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+	}
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+
+	if (len >= buf_len)
+		buf[len - 1] = 0;
+	else
+		buf[len] = 0;
+}
+
+void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
+				     struct ath10k_fw_stats *fw_stats,
+				     char *buf)
+{
+	unsigned int len = 0;
+	unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
+	const struct ath10k_fw_stats_pdev *pdev;
+	const struct ath10k_fw_stats_vdev *vdev;
+	const struct ath10k_fw_stats_peer *peer;
+	size_t num_peers;
+	size_t num_vdevs;
+
+	spin_lock_bh(&ar->data_lock);
+
+	pdev = list_first_entry_or_null(&fw_stats->pdevs,
+					struct ath10k_fw_stats_pdev, list);
+	if (!pdev) {
+		ath10k_warn(ar, "failed to get pdev stats\n");
+		goto unlock;
+	}
+
+	num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+	num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			 "ath10k VDEV stats", num_vdevs);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			 "ath10k PEER stats", num_peers);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "=================");
+
+	list_for_each_entry(peer, &fw_stats->peers, list) {
+		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+	}
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+
+	if (len >= buf_len)
+		buf[len - 1] = 0;
+	else
+		buf[len] = 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
+					   u32 detect_level, u32 detect_margin)
+{
+	struct wmi_pdev_set_adaptive_cca_params *cmd;
+	struct sk_buff *skb;
+
+	skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+	if (!skb)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
+	cmd->enable = __cpu_to_le32(enable);
+	cmd->cca_detect_level = __cpu_to_le32(detect_level);
+	cmd->cca_detect_margin = __cpu_to_le32(detect_margin);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI,
+		   "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n",
+		   enable, detect_level, detect_margin);
+	return skb;
+}
+
+void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
+				      struct ath10k_fw_stats *fw_stats,
+				      char *buf)
+{
+	u32 len = 0;
+	u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+	const struct ath10k_fw_stats_pdev *pdev;
+	const struct ath10k_fw_stats_vdev *vdev;
+	const struct ath10k_fw_stats_peer *peer;
+	size_t num_peers;
+	size_t num_vdevs;
+
+	spin_lock_bh(&ar->data_lock);
+
+	pdev = list_first_entry_or_null(&fw_stats->pdevs,
+					struct ath10k_fw_stats_pdev, list);
+	if (!pdev) {
+		ath10k_warn(ar, "failed to get pdev stats\n");
+		goto unlock;
+	}
+
+	num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+	num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+	ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
+	ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"HW paused", pdev->hw_paused);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Seqs posted", pdev->seq_posted);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Seqs failed queueing", pdev->seq_failed_queueing);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Seqs completed", pdev->seq_completed);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Seqs restarted", pdev->seq_restarted);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"MU Seqs posted", pdev->mu_seq_posted);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"MPDUs SW flushed", pdev->mpdus_sw_flush);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"MPDUs HW filtered", pdev->mpdus_hw_filter);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"MPDUs truncated", pdev->mpdus_truncated);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"MPDUs receive no ACK", pdev->mpdus_ack_failed);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"MPDUs expired", pdev->mpdus_expired);
+
+	ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Num Rx Overflow errors", pdev->rx_ovfl_errs);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			"ath10k VDEV stats", num_vdevs);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				"=================");
+
+	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+		ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			"ath10k PEER stats", num_peers);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				"=================");
+
+	list_for_each_entry(peer, &fw_stats->peers, list) {
+		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+	}
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+
+	if (len >= buf_len)
+		buf[len - 1] = 0;
+	else
+		buf[len] = 0;
+}
+
 static const struct wmi_ops wmi_ops = {
 	.rx = ath10k_wmi_op_rx,
 	.map_svc = wmi_main_svc_map,
@@ -6353,10 +7290,12 @@
 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+	.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
 	/* .gen_bcn_tmpl not implemented */
 	/* .gen_prb_tmpl not implemented */
 	/* .gen_p2p_go_bcn_ie not implemented */
 	/* .gen_adaptive_qcs not implemented */
+	/* .gen_pdev_enable_adaptive_cca not implemented */
 };
 
 static const struct wmi_ops wmi_10_1_ops = {
@@ -6418,10 +7357,12 @@
 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
 	/* .gen_bcn_tmpl not implemented */
 	/* .gen_prb_tmpl not implemented */
 	/* .gen_p2p_go_bcn_ie not implemented */
 	/* .gen_adaptive_qcs not implemented */
+	/* .gen_pdev_enable_adaptive_cca not implemented */
 };
 
 static const struct wmi_ops wmi_10_2_ops = {
@@ -6484,6 +7425,8 @@
 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+	/* .gen_pdev_enable_adaptive_cca not implemented */
 };
 
 static const struct wmi_ops wmi_10_2_4_ops = {
@@ -6545,6 +7488,10 @@
 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+	.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
+	.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+	.gen_pdev_enable_adaptive_cca =
+		ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
 	/* .gen_bcn_tmpl not implemented */
 	/* .gen_prb_tmpl not implemented */
 	/* .gen_p2p_go_bcn_ie not implemented */
@@ -6555,6 +7502,7 @@
 	.rx = ath10k_wmi_10_4_op_rx,
 	.map_svc = wmi_10_4_svc_map,
 
+	.pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats,
 	.pull_scan = ath10k_wmi_op_pull_scan_ev,
 	.pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
 	.pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
@@ -6604,9 +7552,11 @@
 	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
 	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
 	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+	.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
 
 	/* shared with 10.2 */
 	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+	.gen_request_stats = ath10k_wmi_op_gen_request_stats,
 };
 
 int ath10k_wmi_attach(struct ath10k *ar)
@@ -6660,15 +7610,10 @@
 	return 0;
 }
 
-void ath10k_wmi_detach(struct ath10k *ar)
+void ath10k_wmi_free_host_mem(struct ath10k *ar)
 {
 	int i;
 
-	cancel_work_sync(&ar->svc_rdy_work);
-
-	if (ar->svc_rdy_skb)
-		dev_kfree_skb(ar->svc_rdy_skb);
-
 	/* free the host memory chunks requested by firmware */
 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
 		dma_free_coherent(ar->dev,
@@ -6679,3 +7624,11 @@
 
 	ar->wmi.num_mem_chunks = 0;
 }
+
+void ath10k_wmi_detach(struct ath10k *ar)
+{
+	cancel_work_sync(&ar->svc_rdy_work);
+
+	if (ar->svc_rdy_skb)
+		dev_kfree_skb(ar->svc_rdy_skb);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 52d3503..72a4ef7 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -73,6 +73,25 @@
 #define HTC_PROTOCOL_VERSION    0x0002
 #define WMI_PROTOCOL_VERSION    0x0002
 
+/*
+ * There is no signed version of __le32, so for a temporary solution come
+ * up with our own version. The idea is from fs/ntfs/types.h.
+ *
+ * Use a_ prefix so that it doesn't conflict if we get proper support to
+ * linux/types.h.
+ */
+typedef __s32 __bitwise a_sle32;
+
+static inline a_sle32 a_cpu_to_sle32(s32 val)
+{
+	return (__force a_sle32)cpu_to_le32(val);
+}
+
+static inline s32 a_sle32_to_cpu(a_sle32 val)
+{
+	return le32_to_cpu((__force __le32)val);
+}
+
 enum wmi_service {
 	WMI_SERVICE_BEACON_OFFLOAD = 0,
 	WMI_SERVICE_SCAN_OFFLOAD,
@@ -753,6 +772,7 @@
 	u32 mu_cal_start_cmdid;
 	u32 set_cca_params_cmdid;
 	u32 pdev_bss_chan_info_request_cmdid;
+	u32 pdev_enable_adaptive_cca_cmdid;
 };
 
 /*
@@ -1362,6 +1382,9 @@
 	WMI_10_2_VDEV_ATF_REQUEST_CMDID,
 	WMI_10_2_PEER_ATF_REQUEST_CMDID,
 	WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+	WMI_10_2_MU_CAL_START_CMDID,
+	WMI_10_2_SET_LTEU_CONFIG_CMDID,
+	WMI_10_2_SET_CCA_PARAMS,
 	WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
 };
 
@@ -3642,8 +3665,18 @@
 	__le32 param;
 } __packed;
 
+#define WMI_TPC_CONFIG_PARAM		1
 #define WMI_TPC_RATE_MAX		160
 #define WMI_TPC_TX_N_CHAIN		4
+#define WMI_TPC_PREAM_TABLE_MAX		10
+#define WMI_TPC_FLAG			3
+#define WMI_TPC_BUF_SIZE		10
+
+enum wmi_tpc_table_type {
+	WMI_TPC_TABLE_TYPE_CDD = 0,
+	WMI_TPC_TABLE_TYPE_STBC = 1,
+	WMI_TPC_TABLE_TYPE_TXBF = 2,
+};
 
 enum wmi_tpc_config_event_flag {
 	WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD	= 0x1,
@@ -3657,7 +3690,7 @@
 	__le32 phy_mode;
 	__le32 twice_antenna_reduction;
 	__le32 twice_max_rd_power;
-	s32 twice_antenna_gain;
+	a_sle32 twice_antenna_gain;
 	__le32 power_limit;
 	__le32 rate_max;
 	__le32 num_tx_chain;
@@ -3833,6 +3866,111 @@
 	__le32 txop_ovf;
 } __packed;
 
+struct wmi_10_4_pdev_stats_tx {
+	/* Num HTT cookies queued to dispatch list */
+	__le32 comp_queued;
+
+	/* Num HTT cookies dispatched */
+	__le32 comp_delivered;
+
+	/* Num MSDU queued to WAL */
+	__le32 msdu_enqued;
+
+	/* Num MPDU queue to WAL */
+	__le32 mpdu_enqued;
+
+	/* Num MSDUs dropped by WMM limit */
+	__le32 wmm_drop;
+
+	/* Num Local frames queued */
+	__le32 local_enqued;
+
+	/* Num Local frames done */
+	__le32 local_freed;
+
+	/* Num queued to HW */
+	__le32 hw_queued;
+
+	/* Num PPDU reaped from HW */
+	__le32 hw_reaped;
+
+	/* Num underruns */
+	__le32 underrun;
+
+	/* HW Paused. */
+	__le32  hw_paused;
+
+	/* Num PPDUs cleaned up in TX abort */
+	__le32 tx_abort;
+
+	/* Num MPDUs requed by SW */
+	__le32 mpdus_requed;
+
+	/* excessive retries */
+	__le32 tx_ko;
+
+	/* data hw rate code */
+	__le32 data_rc;
+
+	/* Scheduler self triggers */
+	__le32 self_triggers;
+
+	/* frames dropped due to excessive sw retries */
+	__le32 sw_retry_failure;
+
+	/* illegal rate phy errors  */
+	__le32 illgl_rate_phy_err;
+
+	/* wal pdev continuous xretry */
+	__le32 pdev_cont_xretry;
+
+	/* wal pdev tx timeouts */
+	__le32 pdev_tx_timeout;
+
+	/* wal pdev resets  */
+	__le32 pdev_resets;
+
+	/* frames dropped due to non-availability of stateless TIDs */
+	__le32 stateless_tid_alloc_failure;
+
+	__le32 phy_underrun;
+
+	/* MPDU is more than txop limit */
+	__le32 txop_ovf;
+
+	/* Number of Sequences posted */
+	__le32 seq_posted;
+
+	/* Number of Sequences failed queueing */
+	__le32 seq_failed_queueing;
+
+	/* Number of Sequences completed */
+	__le32 seq_completed;
+
+	/* Number of Sequences restarted */
+	__le32 seq_restarted;
+
+	/* Number of MU Sequences posted */
+	__le32 mu_seq_posted;
+
+	/* Num MPDUs flushed by SW, HWPAUSED,SW TXABORT(Reset,channel change) */
+	__le32 mpdus_sw_flush;
+
+	/* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+	__le32 mpdus_hw_filter;
+
+	/* Num MPDUs truncated by PDG
+	 * (TXOP, TBTT, PPDU_duration based on rate, dyn_bw)
+	 */
+	__le32 mpdus_truncated;
+
+	/* Num MPDUs that was tried but didn't receive ACK or BA */
+	__le32 mpdus_ack_failed;
+
+	/* Num MPDUs that was dropped due to expiry. */
+	__le32 mpdus_expired;
+} __packed;
+
 struct wmi_pdev_stats_rx {
 	/* Cnts any change in ring routing mid-ppdu */
 	__le32 mid_ppdu_route_change;
@@ -4006,6 +4144,16 @@
 	struct wmi_pdev_stats_extra extra;
 } __packed;
 
+struct wmi_10_4_pdev_stats {
+	struct wmi_pdev_stats_base base;
+	struct wmi_10_4_pdev_stats_tx tx;
+	struct wmi_pdev_stats_rx rx;
+	__le32 rx_ovfl_errs;
+	struct wmi_pdev_stats_mem mem;
+	__le32 sram_free_size;
+	struct wmi_pdev_stats_extra extra;
+} __packed;
+
 /*
  * VDEV statistics
  * TODO: add all VDEV stats here
@@ -4047,6 +4195,23 @@
 	__le32 unknown_value; /* FIXME: what is this word? */
 } __packed;
 
+struct wmi_10_4_peer_stats {
+	struct wmi_mac_addr peer_macaddr;
+	__le32 peer_rssi;
+	__le32 peer_rssi_seq_num;
+	__le32 peer_tx_rate;
+	__le32 peer_rx_rate;
+	__le32 current_per;
+	__le32 retries;
+	__le32 tx_rate_count;
+	__le32 max_4ms_frame_len;
+	__le32 total_sub_frames;
+	__le32 tx_bytes;
+	__le32 num_pkt_loss_overflow[4];
+	__le32 num_pkt_loss_excess_retry[4];
+	__le32 peer_rssi_changed;
+} __packed;
+
 struct wmi_10_2_pdev_ext_stats {
 	__le32 rx_rssi_comb;
 	__le32 rx_rssi[4];
@@ -4253,6 +4418,11 @@
 	WMI_RATE_PREAMBLE_VHT,
 };
 
+#define ATH10K_HW_NSS(rate)		(1 + (((rate) >> 4) & 0x3))
+#define ATH10K_HW_PREAMBLE(rate)	(((rate) >> 6) & 0x3)
+#define ATH10K_HW_RATECODE(rate, nss, preamble)	\
+	(((preamble) << 6) | ((nss) << 4) | (rate))
+
 /* Value to disable fixed rate setting */
 #define WMI_FIXED_RATE_NONE    (0xff)
 
@@ -6060,13 +6230,24 @@
 	WMI_TXBF_CONF_AFTER_ASSOC,
 };
 
+#define	WMI_CCA_DETECT_LEVEL_AUTO	0
+#define	WMI_CCA_DETECT_MARGIN_AUTO	0
+
+struct wmi_pdev_set_adaptive_cca_params {
+	__le32 enable;
+	__le32 cca_detect_level;
+	__le32 cca_detect_margin;
+} __packed;
+
 struct ath10k;
 struct ath10k_vif;
 struct ath10k_fw_stats_pdev;
 struct ath10k_fw_stats_peer;
+struct ath10k_fw_stats;
 
 int ath10k_wmi_attach(struct ath10k *ar);
 void ath10k_wmi_detach(struct ath10k *ar);
+void ath10k_wmi_free_host_mem(struct ath10k *ar);
 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
 
@@ -6144,4 +6325,16 @@
 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf,
 				 int left_len, struct wmi_phyerr_ev_arg *arg);
+void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
+				      struct ath10k_fw_stats *fw_stats,
+				      char *buf);
+void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
+				     struct ath10k_fw_stats *fw_stats,
+				     char *buf);
+size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head);
+size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head);
+void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
+				      struct ath10k_fw_stats *fw_stats,
+				      char *buf);
+
 #endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index a511ef3..81ac8c5 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -2217,7 +2217,7 @@
 
 	/* enter / leave wow suspend on first vif always */
 	first_vif = ath6kl_vif_first(ar);
-	if (WARN_ON(unlikely(!first_vif)) ||
+	if (WARN_ON(!first_vif) ||
 	    !ath6kl_cfg80211_ready(first_vif))
 		return -EIO;
 
@@ -2297,7 +2297,7 @@
 	int ret;
 
 	vif = ath6kl_vif_first(ar);
-	if (WARN_ON(unlikely(!vif)) ||
+	if (WARN_ON(!vif) ||
 	    !ath6kl_cfg80211_ready(vif))
 		return -EIO;
 
@@ -3231,6 +3231,15 @@
 					wait, buf, len, no_cck);
 }
 
+static int ath6kl_get_antenna(struct wiphy *wiphy,
+			      u32 *tx_ant, u32 *rx_ant)
+{
+	struct ath6kl *ar = wiphy_priv(wiphy);
+	*tx_ant = ar->hw.tx_ant;
+	*rx_ant = ar->hw.rx_ant;
+	return 0;
+}
+
 static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
 				       struct wireless_dev *wdev,
 				       u16 frame_type, bool reg)
@@ -3312,7 +3321,7 @@
 	}
 
 	/* fw uses seconds, also make sure that it's >0 */
-	interval = max_t(u16, 1, request->interval / 1000);
+	interval = max_t(u16, 1, request->scan_plans[0].interval);
 
 	ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
 				  interval, interval,
@@ -3447,6 +3456,7 @@
 	.cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
 	.mgmt_tx = ath6kl_mgmt_tx,
 	.mgmt_frame_register = ath6kl_mgmt_frame_register,
+	.get_antenna = ath6kl_get_antenna,
 	.sched_scan_start = ath6kl_cfg80211_sscan_start,
 	.sched_scan_stop = ath6kl_cfg80211_sscan_stop,
 	.set_bitrate_mask = ath6kl_cfg80211_set_bitrate,
@@ -3634,6 +3644,127 @@
 	ar->num_vif--;
 }
 
+static const char ath6kl_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
+	/* Common stats names used by many drivers. */
+	"tx_pkts_nic", "tx_bytes_nic", "rx_pkts_nic", "rx_bytes_nic",
+
+	/* TX stats. */
+	"d_tx_ucast_pkts", "d_tx_bcast_pkts",
+	"d_tx_ucast_bytes", "d_tx_bcast_bytes",
+	"d_tx_rts_ok", "d_tx_error", "d_tx_fail",
+	"d_tx_retry", "d_tx_multi_retry", "d_tx_rts_fail",
+	"d_tx_tkip_counter_measures",
+
+	/* RX Stats. */
+	"d_rx_ucast_pkts", "d_rx_ucast_rate", "d_rx_bcast_pkts",
+	"d_rx_ucast_bytes", "d_rx_bcast_bytes", "d_rx_frag_pkt",
+	"d_rx_error", "d_rx_crc_err", "d_rx_keycache_miss",
+	"d_rx_decrypt_crc_err", "d_rx_duplicate_frames",
+	"d_rx_mic_err", "d_rx_tkip_format_err", "d_rx_ccmp_format_err",
+	"d_rx_ccmp_replay_err",
+
+	/* Misc stats. */
+	"d_beacon_miss", "d_num_connects", "d_num_disconnects",
+	"d_beacon_avg_rssi", "d_arp_received", "d_arp_matched",
+	"d_arp_replied"
+};
+
+#define ATH6KL_STATS_LEN	ARRAY_SIZE(ath6kl_gstrings_sta_stats)
+
+static int ath6kl_get_sset_count(struct net_device *dev, int sset)
+{
+	int rv = 0;
+
+	if (sset == ETH_SS_STATS)
+		rv += ATH6KL_STATS_LEN;
+
+	if (rv == 0)
+		return -EOPNOTSUPP;
+	return rv;
+}
+
+static void ath6kl_get_stats(struct net_device *dev,
+			    struct ethtool_stats *stats,
+			    u64 *data)
+{
+	struct ath6kl_vif *vif = netdev_priv(dev);
+	struct ath6kl *ar = vif->ar;
+	int i = 0;
+	struct target_stats *tgt_stats;
+
+	memset(data, 0, sizeof(u64) * ATH6KL_STATS_LEN);
+
+	ath6kl_read_tgt_stats(ar, vif);
+
+	tgt_stats = &vif->target_stats;
+
+	data[i++] = tgt_stats->tx_ucast_pkt + tgt_stats->tx_bcast_pkt;
+	data[i++] = tgt_stats->tx_ucast_byte + tgt_stats->tx_bcast_byte;
+	data[i++] = tgt_stats->rx_ucast_pkt + tgt_stats->rx_bcast_pkt;
+	data[i++] = tgt_stats->rx_ucast_byte + tgt_stats->rx_bcast_byte;
+
+	data[i++] = tgt_stats->tx_ucast_pkt;
+	data[i++] = tgt_stats->tx_bcast_pkt;
+	data[i++] = tgt_stats->tx_ucast_byte;
+	data[i++] = tgt_stats->tx_bcast_byte;
+	data[i++] = tgt_stats->tx_rts_success_cnt;
+	data[i++] = tgt_stats->tx_err;
+	data[i++] = tgt_stats->tx_fail_cnt;
+	data[i++] = tgt_stats->tx_retry_cnt;
+	data[i++] = tgt_stats->tx_mult_retry_cnt;
+	data[i++] = tgt_stats->tx_rts_fail_cnt;
+	data[i++] = tgt_stats->tkip_cnter_measures_invoked;
+
+	data[i++] = tgt_stats->rx_ucast_pkt;
+	data[i++] = tgt_stats->rx_ucast_rate;
+	data[i++] = tgt_stats->rx_bcast_pkt;
+	data[i++] = tgt_stats->rx_ucast_byte;
+	data[i++] = tgt_stats->rx_bcast_byte;
+	data[i++] = tgt_stats->rx_frgment_pkt;
+	data[i++] = tgt_stats->rx_err;
+	data[i++] = tgt_stats->rx_crc_err;
+	data[i++] = tgt_stats->rx_key_cache_miss;
+	data[i++] = tgt_stats->rx_decrypt_err;
+	data[i++] = tgt_stats->rx_dupl_frame;
+	data[i++] = tgt_stats->tkip_local_mic_fail;
+	data[i++] = tgt_stats->tkip_fmt_err;
+	data[i++] = tgt_stats->ccmp_fmt_err;
+	data[i++] = tgt_stats->ccmp_replays;
+
+	data[i++] = tgt_stats->cs_bmiss_cnt;
+	data[i++] = tgt_stats->cs_connect_cnt;
+	data[i++] = tgt_stats->cs_discon_cnt;
+	data[i++] = tgt_stats->cs_ave_beacon_rssi;
+	data[i++] = tgt_stats->arp_received;
+	data[i++] = tgt_stats->arp_matched;
+	data[i++] = tgt_stats->arp_replied;
+
+	if (i !=  ATH6KL_STATS_LEN) {
+		WARN_ON_ONCE(1);
+		ath6kl_err("ethtool stats error, i: %d  STATS_LEN: %d\n",
+			   i, (int)ATH6KL_STATS_LEN);
+	}
+}
+
+/* These stats are per NIC, not really per vdev, so we just ignore dev. */
+static void ath6kl_get_strings(struct net_device *dev, u32 sset, u8 *data)
+{
+	int sz_sta_stats = 0;
+
+	if (sset == ETH_SS_STATS) {
+		sz_sta_stats = sizeof(ath6kl_gstrings_sta_stats);
+		memcpy(data, ath6kl_gstrings_sta_stats, sz_sta_stats);
+	}
+}
+
+static const struct ethtool_ops ath6kl_ethtool_ops = {
+	.get_drvinfo = cfg80211_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+	.get_strings = ath6kl_get_strings,
+	.get_ethtool_stats = ath6kl_get_stats,
+	.get_sset_count = ath6kl_get_sset_count,
+};
+
 struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
 					  unsigned char name_assign_type,
 					  enum nl80211_iftype type,
@@ -3679,6 +3810,8 @@
 	if (ath6kl_cfg80211_vif_init(vif))
 		goto err;
 
+	netdev_set_default_ethtool_ops(ndev, &ath6kl_ethtool_ops);
+
 	if (register_netdevice(ndev))
 		goto err;
 
@@ -3786,6 +3919,9 @@
 		ath6kl_band_2ghz.ht_cap.ht_supported = false;
 		ath6kl_band_5ghz.ht_cap.cap = 0;
 		ath6kl_band_5ghz.ht_cap.ht_supported = false;
+
+		if (ht)
+			ath6kl_err("Firmware lacks RSN-CAP-OVERRIDE, so HT (802.11n) is disabled.");
 	}
 
 	if (test_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES,
@@ -3794,11 +3930,18 @@
 		ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
 		ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff;
 		ath6kl_band_5ghz.ht_cap.mcs.rx_mask[1] = 0xff;
+		ar->hw.tx_ant = 2;
+		ar->hw.rx_ant = 2;
 	} else {
 		ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
 		ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+		ar->hw.tx_ant = 1;
+		ar->hw.rx_ant = 1;
 	}
 
+	wiphy->available_antennas_tx = ar->hw.tx_ant;
+	wiphy->available_antennas_rx = ar->hw.rx_ant;
+
 	if (band_2gig)
 		wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
 	if (band_5gig)
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 2b78c86..5f3acfe 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -782,6 +782,8 @@
 		u32 refclk_hz;
 		u32 uarttx_pin;
 		u32 testscript_addr;
+		u8 tx_ant;
+		u8 rx_ant;
 		enum wmi_phy_cap cap;
 
 		u32 flags;
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 81ba48d..e2b7809 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -98,6 +98,33 @@
 }
 EXPORT_SYMBOL(ath6kl_warn);
 
+int ath6kl_read_tgt_stats(struct ath6kl *ar, struct ath6kl_vif *vif)
+{
+	long left;
+
+	if (down_interruptible(&ar->sem))
+		return -EBUSY;
+
+	set_bit(STATS_UPDATE_PEND, &vif->flags);
+
+	if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) {
+		up(&ar->sem);
+		return -EIO;
+	}
+
+	left = wait_event_interruptible_timeout(ar->event_wq,
+						!test_bit(STATS_UPDATE_PEND,
+						&vif->flags), WMI_TIMEOUT);
+
+	up(&ar->sem);
+
+	if (left <= 0)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+EXPORT_SYMBOL(ath6kl_read_tgt_stats);
+
 #ifdef CONFIG_ATH6KL_DEBUG
 
 void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
@@ -544,42 +571,24 @@
 	char *buf;
 	unsigned int len = 0, buf_len = 1500;
 	int i;
-	long left;
 	ssize_t ret_cnt;
+	int rv;
 
 	vif = ath6kl_vif_first(ar);
 	if (!vif)
 		return -EIO;
 
-	tgt_stats = &vif->target_stats;
-
 	buf = kzalloc(buf_len, GFP_KERNEL);
 	if (!buf)
 		return -ENOMEM;
 
-	if (down_interruptible(&ar->sem)) {
+	rv = ath6kl_read_tgt_stats(ar, vif);
+	if (rv < 0) {
 		kfree(buf);
-		return -EBUSY;
+		return rv;
 	}
 
-	set_bit(STATS_UPDATE_PEND, &vif->flags);
-
-	if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) {
-		up(&ar->sem);
-		kfree(buf);
-		return -EIO;
-	}
-
-	left = wait_event_interruptible_timeout(ar->event_wq,
-						!test_bit(STATS_UPDATE_PEND,
-						&vif->flags), WMI_TIMEOUT);
-
-	up(&ar->sem);
-
-	if (left <= 0) {
-		kfree(buf);
-		return -ETIMEDOUT;
-	}
+	tgt_stats = &vif->target_stats;
 
 	len += scnprintf(buf + len, buf_len - len, "\n");
 	len += scnprintf(buf + len, buf_len - len, "%25s\n",
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 19106ed..0614393 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -59,6 +59,8 @@
 	ATH6KL_WAR_INVALID_RATE,
 };
 
+int ath6kl_read_tgt_stats(struct ath6kl *ar, struct ath6kl_vif *vif);
+
 #ifdef CONFIG_ATH6KL_DEBUG
 
 void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...);
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index e481f14..fffb65b 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -1085,9 +1085,7 @@
 	send_pkt->completion = NULL;
 	ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0);
 	status = ath6kl_htc_tx_issue(target, send_pkt);
-
-	if (send_pkt != NULL)
-		htc_reclaim_txctrl_buf(target, send_pkt);
+	htc_reclaim_txctrl_buf(target, send_pkt);
 
 	return status;
 }
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 6e473fa..e3f3a6a 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -994,7 +994,7 @@
 		switch (ie_id) {
 		case ATH6KL_FW_IE_FW_VERSION:
 			strlcpy(ar->wiphy->fw_version, data,
-				sizeof(ar->wiphy->fw_version));
+				min(sizeof(ar->wiphy->fw_version), ie_len+1));
 
 			ath6kl_dbg(ATH6KL_DBG_BOOT,
 				   "found fw version %s\n",
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index 6314ae2..9d17a53 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -610,8 +610,8 @@
 #define AR_PHY_CCA_MIN_GOOD_VAL_9271_2GHZ      -127
 #define AR_PHY_CCA_MAX_GOOD_VAL_9271_2GHZ      -116
 
-#define AR_PHY_CCA_NOM_VAL_9287_2GHZ           -120
+#define AR_PHY_CCA_NOM_VAL_9287_2GHZ           -112
 #define AR_PHY_CCA_MIN_GOOD_VAL_9287_2GHZ    -127
-#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ    -110
+#define AR_PHY_CCA_MAX_GOOD_VAL_9287_2GHZ    -97
 
 #endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 174442b..0c39199 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1249,7 +1249,8 @@
 		REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
 			      AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
 
-	if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+	if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
+	    AR_SREV_9561(ah)) {
 		if (is_2g)
 			REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
 				      AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
@@ -1640,7 +1641,8 @@
 
 skip_tx_iqcal:
 	if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
-		if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) {
+		if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) ||
+		    AR_SREV_9561(ah)) {
 			for (i = 0; i < AR9300_MAX_CHAINS; i++) {
 				if (!(ah->rxchainmask & (1 << i)))
 					continue;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 79fd3b2..8b238c1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -857,7 +857,7 @@
 			       qca956x_1p0_common_rx_gain_table);
 		INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
 			       qca956x_1p0_common_rx_gain_bounds);
-		INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+		INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
 			       qca956x_1p0_xlna_only);
 	} else if (AR_SREV_9580(ah))
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
@@ -942,7 +942,7 @@
 			       ar9462_2p1_baseband_core_mix_rxgain);
 		INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
 			       ar9462_2p1_baseband_postamble_mix_rxgain);
-		INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+		INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
 			       ar9462_2p1_baseband_postamble_5g_xlna);
 	} else if (AR_SREV_9462_20(ah)) {
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
@@ -951,7 +951,7 @@
 			       ar9462_2p0_baseband_core_mix_rxgain);
 		INIT_INI_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
 			       ar9462_2p0_baseband_postamble_mix_rxgain);
-		INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+		INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
 			       ar9462_2p0_baseband_postamble_5g_xlna);
 	}
 }
@@ -961,12 +961,12 @@
 	if (AR_SREV_9462_21(ah)) {
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
 			       ar9462_2p1_common_5g_xlna_only_rxgain);
-		INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+		INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
 			       ar9462_2p1_baseband_postamble_5g_xlna);
 	} else if (AR_SREV_9462_20(ah)) {
 		INIT_INI_ARRAY(&ah->iniModesRxGain,
 			       ar9462_2p0_common_5g_xlna_only_rxgain);
-		INIT_INI_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+		INIT_INI_ARRAY(&ah->ini_modes_rxgain_xlna,
 			       ar9462_2p0_baseband_postamble_5g_xlna);
 	}
 }
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1ad66b7..201425e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -926,19 +926,18 @@
 		 */
 		if ((ar9003_hw_get_rx_gain_idx(ah) == 2) ||
 		    (ar9003_hw_get_rx_gain_idx(ah) == 3)) {
-			REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
+			REG_WRITE_ARRAY(&ah->ini_modes_rxgain_xlna,
 					modesIndex, regWrites);
 		}
-
-		if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
-			REG_WRITE_ARRAY(&ah->ini_modes_rxgain_5g_xlna,
-					modesIndex, regWrites);
 	}
 
 	if (AR_SREV_9550(ah) || AR_SREV_9561(ah))
 		REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
 				regWrites);
 
+	if (AR_SREV_9561(ah) && (ar9003_hw_get_rx_gain_idx(ah) == 0))
+		REG_WRITE_ARRAY(&ah->ini_modes_rxgain_xlna,
+				modesIndex, regWrites);
 	/*
 	 * TXGAIN initvals.
 	 */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index c85c479..b42f4a9 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -635,6 +635,7 @@
 	int nstations; /* number of station vifs */
 	int nwds;      /* number of WDS vifs */
 	int nadhocs;   /* number of adhoc vifs */
+	int nocbs;     /* number of OCB vifs */
 	struct ieee80211_vif *primary_sta;
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/common-debug.c b/drivers/net/wireless/ath/ath9k/common-debug.c
index 3b289f9..84afcf7 100644
--- a/drivers/net/wireless/ath/ath9k/common-debug.c
+++ b/drivers/net/wireless/ath/ath9k/common-debug.c
@@ -207,6 +207,7 @@
 	PHY_ERR("RADAR ERR", ATH9K_PHYERR_RADAR);
 	PHY_ERR("SERVICE ERR", ATH9K_PHYERR_SERVICE);
 	PHY_ERR("TOR ERR", ATH9K_PHYERR_TOR);
+
 	PHY_ERR("OFDM-TIMING ERR", ATH9K_PHYERR_OFDM_TIMING);
 	PHY_ERR("OFDM-SIGNAL-PARITY ERR", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
 	PHY_ERR("OFDM-RATE ERR", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
@@ -214,17 +215,24 @@
 	PHY_ERR("OFDM-POWER-DROP ERR", ATH9K_PHYERR_OFDM_POWER_DROP);
 	PHY_ERR("OFDM-SERVICE ERR", ATH9K_PHYERR_OFDM_SERVICE);
 	PHY_ERR("OFDM-RESTART ERR", ATH9K_PHYERR_OFDM_RESTART);
-	PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
+
+	PHY_ERR("CCK-BLOCKER ERR", ATH9K_PHYERR_CCK_BLOCKER);
 	PHY_ERR("CCK-TIMING ERR", ATH9K_PHYERR_CCK_TIMING);
 	PHY_ERR("CCK-HEADER-CRC ERR", ATH9K_PHYERR_CCK_HEADER_CRC);
 	PHY_ERR("CCK-RATE ERR", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
-	PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
-	PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
 	PHY_ERR("CCK-LENGTH ERR", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
 	PHY_ERR("CCK-POWER-DROP ERR", ATH9K_PHYERR_CCK_POWER_DROP);
+	PHY_ERR("CCK-SERVICE ERR", ATH9K_PHYERR_CCK_SERVICE);
+	PHY_ERR("CCK-RESTART ERR", ATH9K_PHYERR_CCK_RESTART);
+
 	PHY_ERR("HT-CRC ERR", ATH9K_PHYERR_HT_CRC_ERROR);
 	PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
 	PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+	PHY_ERR("HT-ZLF ERR", ATH9K_PHYERR_HT_ZLF);
+
+	PHY_ERR("FALSE-RADAR-EXT ERR", ATH9K_PHYERR_FALSE_RADAR_EXT);
+	PHY_ERR("GREEN-FIELD ERR", ATH9K_PHYERR_GREEN_FIELD);
+	PHY_ERR("SPECTRAL ERR", ATH9K_PHYERR_SPECTRAL);
 
 	if (len > size)
 		len = size;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index da32c8f..6de64cf 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -741,8 +741,8 @@
 			   i++, (int)(ctx->assigned), iter_data.naps,
 			   iter_data.nstations,
 			   iter_data.nmeshes, iter_data.nwds);
-		seq_printf(file, " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
-			   iter_data.nadhocs, sc->cur_chan->nvifs,
+		seq_printf(file, " ADHOC: %i OCB: %i TOTAL: %hi BEACON-VIF: %hi\n",
+			   iter_data.nadhocs, iter_data.nocbs, sc->cur_chan->nvifs,
 			   sc->nbcnvifs);
 	}
 
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 10c02f5..165dd20 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -17,12 +17,8 @@
 #include <asm/unaligned.h>
 #include "htc.h"
 
-/* identify firmware images */
-#define FIRMWARE_AR7010_1_1     "htc_7010.fw"
-#define FIRMWARE_AR9271         "htc_9271.fw"
-
-MODULE_FIRMWARE(FIRMWARE_AR7010_1_1);
-MODULE_FIRMWARE(FIRMWARE_AR9271);
+MODULE_FIRMWARE(HTC_7010_MODULE_FW);
+MODULE_FIRMWARE(HTC_9271_MODULE_FW);
 
 static struct usb_device_id ath9k_hif_usb_ids[] = {
 	{ USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
@@ -1080,12 +1076,88 @@
 		device_unlock(parent);
 }
 
+static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context);
+
+/* taken from iwlwifi */
+static int ath9k_hif_request_firmware(struct hif_device_usb *hif_dev,
+				      bool first)
+{
+	char index[8], *chip;
+	int ret;
+
+	if (first) {
+		if (htc_use_dev_fw) {
+			hif_dev->fw_minor_index = FIRMWARE_MINOR_IDX_MAX + 1;
+			sprintf(index, "%s", "dev");
+		} else {
+			hif_dev->fw_minor_index = FIRMWARE_MINOR_IDX_MAX;
+			sprintf(index, "%d", hif_dev->fw_minor_index);
+		}
+	} else {
+		hif_dev->fw_minor_index--;
+		sprintf(index, "%d", hif_dev->fw_minor_index);
+	}
+
+	/* test for FW 1.3 */
+	if (MAJOR_VERSION_REQ == 1 && hif_dev->fw_minor_index == 3) {
+		const char *filename;
+
+		if (IS_AR7010_DEVICE(hif_dev->usb_device_id->driver_info))
+			filename = FIRMWARE_AR7010_1_1;
+		else
+			filename = FIRMWARE_AR9271;
+
+		/* expected fw locations:
+		 * - htc_9271.fw   (stable version 1.3, depricated)
+		 */
+		snprintf(hif_dev->fw_name, sizeof(hif_dev->fw_name),
+			 "%s", filename);
+
+	} else if (hif_dev->fw_minor_index < FIRMWARE_MINOR_IDX_MIN) {
+		dev_err(&hif_dev->udev->dev, "no suitable firmware found!\n");
+
+		return -ENOENT;
+	} else {
+		if (IS_AR7010_DEVICE(hif_dev->usb_device_id->driver_info))
+			chip = "7010";
+		else
+			chip = "9271";
+
+		/* expected fw locations:
+		 * - ath9k_htc/htc_9271-1.dev.0.fw (development version)
+		 * - ath9k_htc/htc_9271-1.4.0.fw   (stable version)
+		 */
+		snprintf(hif_dev->fw_name, sizeof(hif_dev->fw_name),
+			 "%s/htc_%s-%d.%s.0.fw", HTC_FW_PATH,
+			 chip, MAJOR_VERSION_REQ, index);
+	}
+
+	ret = request_firmware_nowait(THIS_MODULE, true, hif_dev->fw_name,
+				      &hif_dev->udev->dev, GFP_KERNEL,
+				      hif_dev, ath9k_hif_usb_firmware_cb);
+	if (ret) {
+		dev_err(&hif_dev->udev->dev,
+			"ath9k_htc: Async request for firmware %s failed\n",
+			hif_dev->fw_name);
+		return ret;
+	}
+
+	dev_info(&hif_dev->udev->dev, "ath9k_htc: Firmware %s requested\n",
+		 hif_dev->fw_name);
+
+	return ret;
+}
+
 static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
 {
 	struct hif_device_usb *hif_dev = context;
 	int ret;
 
 	if (!fw) {
+		ret = ath9k_hif_request_firmware(hif_dev, false);
+		if (!ret)
+			return;
+
 		dev_err(&hif_dev->udev->dev,
 			"ath9k_htc: Failed to get firmware %s\n",
 			hif_dev->fw_name);
@@ -1215,27 +1287,11 @@
 
 	init_completion(&hif_dev->fw_done);
 
-	/* Find out which firmware to load */
-
-	if (IS_AR7010_DEVICE(id->driver_info))
-		hif_dev->fw_name = FIRMWARE_AR7010_1_1;
-	else
-		hif_dev->fw_name = FIRMWARE_AR9271;
-
-	ret = request_firmware_nowait(THIS_MODULE, true, hif_dev->fw_name,
-				      &hif_dev->udev->dev, GFP_KERNEL,
-				      hif_dev, ath9k_hif_usb_firmware_cb);
-	if (ret) {
-		dev_err(&hif_dev->udev->dev,
-			"ath9k_htc: Async request for firmware %s failed\n",
-			hif_dev->fw_name);
+	ret = ath9k_hif_request_firmware(hif_dev, true);
+	if (ret)
 		goto err_fw_req;
-	}
 
-	dev_info(&hif_dev->udev->dev, "ath9k_htc: Firmware %s requested\n",
-		 hif_dev->fw_name);
-
-	return 0;
+	return ret;
 
 err_fw_req:
 	usb_set_intfdata(interface, NULL);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 51496e7..7c2ef7e 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -17,8 +17,26 @@
 #ifndef HTC_USB_H
 #define HTC_USB_H
 
+/* old firmware images */
+#define FIRMWARE_AR7010_1_1     "htc_7010.fw"
+#define FIRMWARE_AR9271         "htc_9271.fw"
+
+/* supported Major FW version */
 #define MAJOR_VERSION_REQ 1
 #define MINOR_VERSION_REQ 3
+/* minimal and maximal supported Minor FW version. */
+#define FIRMWARE_MINOR_IDX_MAX  4
+#define FIRMWARE_MINOR_IDX_MIN  3
+#define HTC_FW_PATH	"ath9k_htc"
+
+#define HTC_9271_MODULE_FW  HTC_FW_PATH "/htc_9271-" \
+			__stringify(MAJOR_VERSION_REQ) \
+			"." __stringify(FIRMWARE_MINOR_IDX_MAX) ".0.fw"
+#define HTC_7010_MODULE_FW  HTC_FW_PATH "/htc_7010-" \
+			__stringify(MAJOR_VERSION_REQ) \
+			"." __stringify(FIRMWARE_MINOR_IDX_MAX) ".0.fw"
+
+extern int htc_use_dev_fw;
 
 #define IS_AR7010_DEVICE(_v) (((_v) == AR9280_USB) || ((_v) == AR9287_USB))
 
@@ -101,7 +119,8 @@
 	struct usb_anchor reg_in_submitted;
 	struct usb_anchor mgmt_submitted;
 	struct sk_buff *remain_skb;
-	const char *fw_name;
+	char fw_name[32];
+	int fw_minor_index;
 	int rx_remain_len;
 	int rx_pkt_len;
 	int rx_transfer_len;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 1e84882..8647ab7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -38,6 +38,10 @@
 module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
 MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
 
+int htc_use_dev_fw = 0;
+module_param_named(use_dev_fw, htc_use_dev_fw, int, 0444);
+MODULE_PARM_DESC(use_dev_fw, "Use development FW version");
+
 #ifdef CONFIG_MAC80211_LEDS
 int ath9k_htc_led_blink = 1;
 module_param_named(blink, ath9k_htc_led_blink, int, 0444);
@@ -736,7 +740,8 @@
 		BIT(NL80211_IFTYPE_AP) |
 		BIT(NL80211_IFTYPE_P2P_GO) |
 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
-		BIT(NL80211_IFTYPE_MESH_POINT);
+		BIT(NL80211_IFTYPE_MESH_POINT) |
+		BIT(NL80211_IFTYPE_OCB);
 
 	hw->wiphy->iface_combinations = &if_comb;
 	hw->wiphy->n_iface_combinations = 1;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 172a9ff4a..a680a97 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1659,7 +1659,7 @@
 				  struct ieee80211_vif *vif,
 				  enum ieee80211_ampdu_mlme_action action,
 				  struct ieee80211_sta *sta,
-				  u16 tid, u16 *ssn, u8 buf_size)
+				  u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
 {
 	struct ath9k_htc_priv *priv = hw->priv;
 	struct ath9k_htc_sta *ista;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1dd0339..bdfff46 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1241,6 +1241,7 @@
 			break;
 		}
 		/* fall through */
+	case NL80211_IFTYPE_OCB:
 	case NL80211_IFTYPE_MESH_POINT:
 	case NL80211_IFTYPE_AP:
 		set |= AR_STA_ID1_STA_AP;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index e8454db..4f0a3f6 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -919,7 +919,7 @@
 	struct ar5416IniArray iniCckfirJapan2484;
 	struct ar5416IniArray iniModes_9271_ANI_reg;
 	struct ar5416IniArray ini_radio_post_sys2ant;
-	struct ar5416IniArray ini_modes_rxgain_5g_xlna;
+	struct ar5416IniArray ini_modes_rxgain_xlna;
 	struct ar5416IniArray ini_modes_rxgain_bb_core;
 	struct ar5416IniArray ini_modes_rxgain_bb_postamble;
 
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 57f95f2..2e2b92b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -855,7 +855,8 @@
 			BIT(NL80211_IFTYPE_STATION) |
 			BIT(NL80211_IFTYPE_ADHOC) |
 			BIT(NL80211_IFTYPE_MESH_POINT) |
-			BIT(NL80211_IFTYPE_WDS);
+			BIT(NL80211_IFTYPE_WDS) |
+			BIT(NL80211_IFTYPE_OCB);
 
 		if (ath9k_is_chanctx_enabled())
 			hw->wiphy->interface_modes |=
@@ -880,6 +881,7 @@
 	hw->max_rate_tries = 10;
 	hw->sta_data_size = sizeof(struct ath_node);
 	hw->vif_data_size = sizeof(struct ath_vif);
+	hw->extra_tx_headroom = 4;
 
 	hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
 	hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index e55fa11..7fbf7f9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -209,21 +209,25 @@
 	ATH9K_PHYERR_OFDM_POWER_DROP      = 21,
 	ATH9K_PHYERR_OFDM_SERVICE         = 22,
 	ATH9K_PHYERR_OFDM_RESTART         = 23,
-	ATH9K_PHYERR_FALSE_RADAR_EXT      = 24,
 
+	ATH9K_PHYERR_CCK_BLOCKER          = 24,
 	ATH9K_PHYERR_CCK_TIMING           = 25,
 	ATH9K_PHYERR_CCK_HEADER_CRC       = 26,
 	ATH9K_PHYERR_CCK_RATE_ILLEGAL     = 27,
+	ATH9K_PHYERR_CCK_LENGTH_ILLEGAL   = 28,
+	ATH9K_PHYERR_CCK_POWER_DROP       = 29,
 	ATH9K_PHYERR_CCK_SERVICE          = 30,
 	ATH9K_PHYERR_CCK_RESTART          = 31,
-	ATH9K_PHYERR_CCK_LENGTH_ILLEGAL   = 32,
-	ATH9K_PHYERR_CCK_POWER_DROP       = 33,
 
-	ATH9K_PHYERR_HT_CRC_ERROR         = 34,
-	ATH9K_PHYERR_HT_LENGTH_ILLEGAL    = 35,
-	ATH9K_PHYERR_HT_RATE_ILLEGAL      = 36,
+	ATH9K_PHYERR_HT_CRC_ERROR         = 32,
+	ATH9K_PHYERR_HT_LENGTH_ILLEGAL    = 33,
+	ATH9K_PHYERR_HT_RATE_ILLEGAL      = 34,
+	ATH9K_PHYERR_HT_ZLF               = 35,
 
-	ATH9K_PHYERR_SPECTRAL		  = 38,
+	ATH9K_PHYERR_FALSE_RADAR_EXT      = 36,
+	ATH9K_PHYERR_GREEN_FIELD          = 37,
+	ATH9K_PHYERR_SPECTRAL             = 38,
+
 	ATH9K_PHYERR_MAX                  = 39,
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c27143b..d184e68 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -938,6 +938,9 @@
 		if (avp->assoc && !iter_data->primary_sta)
 			iter_data->primary_sta = vif;
 		break;
+	case NL80211_IFTYPE_OCB:
+		iter_data->nocbs++;
+		break;
 	case NL80211_IFTYPE_ADHOC:
 		iter_data->nadhocs++;
 		if (vif->bss_conf.enable_beacon)
@@ -1111,6 +1114,8 @@
 
 		if (iter_data.nmeshes)
 			ah->opmode = NL80211_IFTYPE_MESH_POINT;
+		else if (iter_data.nocbs)
+			ah->opmode = NL80211_IFTYPE_OCB;
 		else if (iter_data.nwds)
 			ah->opmode = NL80211_IFTYPE_AP;
 		else if (iter_data.nadhocs)
@@ -1760,7 +1765,8 @@
 		ath9k_calculate_summary_state(sc, avp->chanctx);
 	}
 
-	if (changed & BSS_CHANGED_IBSS) {
+	if ((changed & BSS_CHANGED_IBSS) ||
+	      (changed & BSS_CHANGED_OCB)) {
 		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
 		common->curaid = bss_conf->aid;
 		ath9k_hw_write_associd(sc->sc_ah);
@@ -1856,7 +1862,7 @@
 			      struct ieee80211_vif *vif,
 			      enum ieee80211_ampdu_mlme_action action,
 			      struct ieee80211_sta *sta,
-			      u16 tid, u16 *ssn, u8 buf_size)
+			      u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
 {
 	struct ath_softc *sc = hw->priv;
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index d3189da..994daf6 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -403,7 +403,7 @@
 	    (sc->cur_chan->nvifs <= 1) &&
 	    !(sc->cur_chan->rxfilter & FIF_BCN_PRBRESP_PROMISC))
 		rfilt |= ATH9K_RX_FILTER_MYBEACON;
-	else
+	else if (sc->sc_ah->opmode != NL80211_IFTYPE_OCB)
 		rfilt |= ATH9K_RX_FILTER_BEACON;
 
 	if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 170c209..19d3d64 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1415,7 +1415,7 @@
 				    struct ieee80211_vif *vif,
 				    enum ieee80211_ampdu_mlme_action action,
 				    struct ieee80211_sta *sta,
-				    u16 tid, u16 *ssn, u8 buf_size)
+				    u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
 {
 	struct ar9170 *ar = hw->priv;
 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 924135b..d66533c 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -453,7 +453,7 @@
 	/* post-process RSSI */
 	for (i = 0; i < 7; i++)
 		if (phy->rssi[i] & 0x80)
-			phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f;
+			phy->rssi[i] = ((~phy->rssi[i] & 0x7f) + 1) & 0x7f;
 
 	/* TODO: we could do something with phy_errors */
 	status->signal = ar->noise[0] + phy->rssi_combined;
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 656ce42..2303ef9 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -21,12 +21,6 @@
 #include "dfs_pri_detector.h"
 #include "ath.h"
 
-/*
- * tolerated deviation of radar time stamp in usecs on both sides
- * TODO: this might need to be HW-dependent
- */
-#define PRI_TOLERANCE	16
-
 /**
  * struct radar_types - contains array of patterns defined for one DFS domain
  * @domain: DFS regulatory domain
@@ -121,7 +115,7 @@
 	JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
 	JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
 	JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
-	JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20, 50, false),
+	JP_PATTERN(7, 50, 100, 1000, 2000, 1, 3, 50, false),
 	JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false),
 };
 
@@ -290,10 +284,10 @@
 	if (cd == NULL)
 		return false;
 
-	dpd->last_pulse_ts = event->ts;
 	/* reset detector on time stamp wraparound, caused by TSF reset */
 	if (event->ts < dpd->last_pulse_ts)
 		dpd_reset(dpd);
+	dpd->last_pulse_ts = event->ts;
 
 	/* do type individual pattern matching */
 	for (i = 0; i < dpd->num_radar_types; i++) {
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.h b/drivers/net/wireless/ath/dfs_pattern_detector.h
index 25a43d6..92be353 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.h
@@ -21,6 +21,11 @@
 #include <linux/list.h>
 #include <linux/nl80211.h>
 
+/* tolerated deviation of radar time stamp in usecs on both sides
+ * TODO: this might need to be HW-dependent
+ */
+#define PRI_TOLERANCE	16
+
 /**
  * struct ath_dfs_pool_stats - DFS Statistics for global pools
  */
diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
index cc5c592..05b0464 100644
--- a/drivers/net/wireless/ath/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -25,6 +25,9 @@
 
 #define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
 #define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
+#define GET_PRI_TO_USE(MIN, MAX, RUNTIME) \
+	(MIN + PRI_TOLERANCE == MAX - PRI_TOLERANCE ? \
+	MIN + PRI_TOLERANCE : RUNTIME)
 
 /**
  * struct pulse_elem - elements in pulse queue
@@ -243,7 +246,8 @@
 		ps.count_falses = 0;
 		ps.first_ts = p->ts;
 		ps.last_ts = ts;
-		ps.pri = ts - p->ts;
+		ps.pri = GET_PRI_TO_USE(pde->rs->pri_min,
+			pde->rs->pri_max, ts - p->ts);
 		ps.dur = ps.pri * (pde->rs->ppb - 1)
 				+ 2 * pde->rs->max_pri_tolerance;
 
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 086549b..f8dfa05 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -79,6 +79,7 @@
 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
 	int i;
 
+	spin_lock_init(&ch->lock);
 	for (i = 0; i < ch->desc_num; i++) {
 		cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
 		if (!cur_ctl)
@@ -169,7 +170,7 @@
 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
 }
 
-static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
+static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
 {
 	struct wcn36xx_dxe_desc *cur_dxe = NULL;
 	struct wcn36xx_dxe_desc *prev_dxe = NULL;
@@ -178,7 +179,7 @@
 	int i;
 
 	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
-	wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
+	wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
 					      GFP_KERNEL);
 	if (!wcn_ch->cpu_addr)
 		return -ENOMEM;
@@ -270,7 +271,7 @@
 	return 0;
 }
 
-static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
+static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
 {
 	struct wcn36xx_dxe_desc *dxe = ctl->desc;
 	struct sk_buff *skb;
@@ -279,7 +280,7 @@
 	if (skb == NULL)
 		return -ENOMEM;
 
-	dxe->dst_addr_l = dma_map_single(NULL,
+	dxe->dst_addr_l = dma_map_single(dev,
 					 skb_tail_pointer(skb),
 					 WCN36XX_PKT_SIZE,
 					 DMA_FROM_DEVICE);
@@ -297,7 +298,7 @@
 	cur_ctl = wcn_ch->head_blk_ctl;
 
 	for (i = 0; i < wcn_ch->desc_num; i++) {
-		wcn36xx_dxe_fill_skb(cur_ctl);
+		wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl);
 		cur_ctl = cur_ctl->next;
 	}
 
@@ -345,7 +346,7 @@
 
 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
 {
-	struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
+	struct wcn36xx_dxe_ctl *ctl;
 	struct ieee80211_tx_info *info;
 	unsigned long flags;
 
@@ -354,23 +355,25 @@
 	 * completely full head and tail are pointing to the same element
 	 * and while-do will not make any cycles.
 	 */
+	spin_lock_irqsave(&ch->lock, flags);
+	ctl = ch->tail_blk_ctl;
 	do {
 		if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
 			break;
 		if (ctl->skb) {
-			dma_unmap_single(NULL, ctl->desc->src_addr_l,
+			dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
 					 ctl->skb->len, DMA_TO_DEVICE);
 			info = IEEE80211_SKB_CB(ctl->skb);
 			if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
 				/* Keep frame until TX status comes */
 				ieee80211_free_txskb(wcn->hw, ctl->skb);
 			}
-			spin_lock_irqsave(&ctl->skb_lock, flags);
+			spin_lock(&ctl->skb_lock);
 			if (wcn->queues_stopped) {
 				wcn->queues_stopped = false;
 				ieee80211_wake_queues(wcn->hw);
 			}
-			spin_unlock_irqrestore(&ctl->skb_lock, flags);
+			spin_unlock(&ctl->skb_lock);
 
 			ctl->skb = NULL;
 		}
@@ -379,6 +382,7 @@
 	       !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
 
 	ch->tail_blk_ctl = ctl;
+	spin_unlock_irqrestore(&ch->lock, flags);
 }
 
 static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
@@ -474,7 +478,7 @@
 	while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
 		skb = ctl->skb;
 		dma_addr = dxe->dst_addr_l;
-		wcn36xx_dxe_fill_skb(ctl);
+		wcn36xx_dxe_fill_skb(wcn->dev, ctl);
 
 		switch (ch->ch_type) {
 		case WCN36XX_DXE_CH_RX_L:
@@ -491,7 +495,7 @@
 			wcn36xx_warn("Unknown channel\n");
 		}
 
-		dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
+		dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
 				 DMA_FROM_DEVICE);
 		wcn36xx_rx_skb(wcn, skb);
 		ctl = ctl->next;
@@ -540,7 +544,7 @@
 		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
 
 	s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
-	cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
+	cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
 				      GFP_KERNEL);
 	if (!cpu_addr)
 		goto out_err;
@@ -555,7 +559,7 @@
 		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
 
 	s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
-	cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
+	cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
 				      GFP_KERNEL);
 	if (!cpu_addr)
 		goto out_err;
@@ -574,13 +578,13 @@
 void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
 {
 	if (wcn->mgmt_mem_pool.virt_addr)
-		dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
+		dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
 				  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
 				  wcn->mgmt_mem_pool.virt_addr,
 				  wcn->mgmt_mem_pool.phy_addr);
 
 	if (wcn->data_mem_pool.virt_addr) {
-		dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
+		dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
 				  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
 				  wcn->data_mem_pool.virt_addr,
 				  wcn->data_mem_pool.phy_addr);
@@ -596,12 +600,14 @@
 	struct wcn36xx_dxe_desc *desc = NULL;
 	struct wcn36xx_dxe_ch *ch = NULL;
 	unsigned long flags;
+	int ret;
 
 	ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
 
+	spin_lock_irqsave(&ch->lock, flags);
 	ctl = ch->head_blk_ctl;
 
-	spin_lock_irqsave(&ctl->next->skb_lock, flags);
+	spin_lock(&ctl->next->skb_lock);
 
 	/*
 	 * If skb is not null that means that we reached the tail of the ring
@@ -611,10 +617,11 @@
 	if (NULL != ctl->next->skb) {
 		ieee80211_stop_queues(wcn->hw);
 		wcn->queues_stopped = true;
-		spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+		spin_unlock(&ctl->next->skb_lock);
+		spin_unlock_irqrestore(&ch->lock, flags);
 		return -EBUSY;
 	}
-	spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+	spin_unlock(&ctl->next->skb_lock);
 
 	ctl->skb = NULL;
 	desc = ctl->desc;
@@ -640,10 +647,11 @@
 	desc = ctl->desc;
 	if (ctl->bd_cpu_addr) {
 		wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto unlock;
 	}
 
-	desc->src_addr_l = dma_map_single(NULL,
+	desc->src_addr_l = dma_map_single(wcn->dev,
 					  ctl->skb->data,
 					  ctl->skb->len,
 					  DMA_TO_DEVICE);
@@ -679,7 +687,10 @@
 			ch->reg_ctrl, ch->def_ctrl);
 	}
 
-	return 0;
+	ret = 0;
+unlock:
+	spin_unlock_irqrestore(&ch->lock, flags);
+	return ret;
 }
 
 int wcn36xx_dxe_init(struct wcn36xx *wcn)
@@ -696,7 +707,7 @@
 	/***************************************/
 	/* Init descriptors for TX LOW channel */
 	/***************************************/
-	wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
+	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
 	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
 
 	/* Write channel head to a NEXT register */
@@ -714,7 +725,7 @@
 	/***************************************/
 	/* Init descriptors for TX HIGH channel */
 	/***************************************/
-	wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
+	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
 	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
 
 	/* Write channel head to a NEXT register */
@@ -734,7 +745,7 @@
 	/***************************************/
 	/* Init descriptors for RX LOW channel */
 	/***************************************/
-	wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
+	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
 
 	/* For RX we need to preallocated buffers */
 	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
@@ -764,7 +775,7 @@
 	/***************************************/
 	/* Init descriptors for RX HIGH channel */
 	/***************************************/
-	wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
+	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
 
 	/* For RX we need to prealocat buffers */
 	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
index 35ee7e9..3eca4f9 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.h
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -243,6 +243,7 @@
 };
 
 struct wcn36xx_dxe_ch {
+	spinlock_t			lock;	/* protects head/tail ptrs */
 	enum wcn36xx_dxe_ch_type	ch_type;
 	void				*cpu_addr;
 	dma_addr_t			dma_addr;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 900e72a..7c169ab 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -859,7 +859,7 @@
 		    struct ieee80211_vif *vif,
 		    enum ieee80211_ampdu_mlme_action action,
 		    struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-		    u8 buf_size)
+		    u8 buf_size, bool amsdu)
 {
 	struct wcn36xx *wcn = hw->priv;
 	struct wcn36xx_sta *sta_priv = NULL;
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index ce8c038..6dfedc8 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -1,5 +1,6 @@
 config WIL6210
 	tristate "Wilocity 60g WiFi card wil6210 support"
+	select WANT_DEV_COREDUMP
 	depends on CFG80211
 	depends on PCI
 	default n
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 64b4326..fdf63d5 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -17,6 +17,7 @@
 wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
 wil6210-y += wil_platform.o
 wil6210-y += ethtool.o
+wil6210-y += wil_crash_dump.o
 
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index d1a1e16..97bc186 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1373,6 +1373,12 @@
 				}
 			}
 			spin_unlock_bh(&p->tid_rx_lock);
+			seq_printf(s,
+				   "Rx invalid frame: non-data %lu, short %lu, large %lu\n",
+				   p->stats.rx_non_data_frame,
+				   p->stats.rx_short_frame,
+				   p->stats.rx_large_frame);
+
 			seq_puts(s, "Rx/MCS:");
 			for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
 			     mcs++)
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index a371f036..50c136e 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -236,7 +236,7 @@
 
 		isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
 			 BIT_DMA_EP_RX_ICR_RX_HTRSH);
-		if (likely(test_bit(wil_status_reset_done, wil->status))) {
+		if (likely(test_bit(wil_status_fwready, wil->status))) {
 			if (likely(test_bit(wil_status_napi_en, wil->status))) {
 				wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
 				need_unmask = false;
@@ -286,7 +286,7 @@
 		isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
 		/* clear also all VRING interrupts */
 		isr &= ~(BIT(25) - 1UL);
-		if (likely(test_bit(wil_status_reset_done, wil->status))) {
+		if (likely(test_bit(wil_status_fwready, wil->status))) {
 			wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
 			need_unmask = false;
 			napi_schedule(&wil->napi_tx);
@@ -347,7 +347,12 @@
 	wil6210_mask_irq_misc(wil);
 
 	if (isr & ISR_MISC_FW_ERROR) {
-		wil_err(wil, "Firmware error detected\n");
+		u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE);
+		u32 ucode_assert_code = wil_r(wil, RGF_UCODE_ASSERT_CODE);
+
+		wil_err(wil,
+			"Firmware error detected, assert codes FW 0x%08x, UCODE 0x%08x\n",
+			fw_assert_code, ucode_assert_code);
 		clear_bit(wil_status_fwready, wil->status);
 		/*
 		 * do not clear @isr here - we do 2-nd part in thread
@@ -359,7 +364,7 @@
 	if (isr & ISR_MISC_FW_READY) {
 		wil_dbg_irq(wil, "IRQ: FW ready\n");
 		wil_cache_mbox_regs(wil);
-		set_bit(wil_status_reset_done, wil->status);
+		set_bit(wil_status_mbox_ready, wil->status);
 		/**
 		 * Actual FW ready indicated by the
 		 * WMI_FW_READY_EVENTID
@@ -386,6 +391,7 @@
 	wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
 
 	if (isr & ISR_MISC_FW_ERROR) {
+		wil_fw_core_dump(wil);
 		wil_notify_fw_error(wil);
 		isr &= ~ISR_MISC_FW_ERROR;
 		wil_fw_error_recovery(wil);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 2fb04c5..bb69a59 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -203,11 +203,13 @@
 	 * - disconnect single STA, already disconnected
 	 * - disconnect all
 	 *
-	 * For "disconnect all", there are 2 options:
+	 * For "disconnect all", there are 3 options:
 	 * - bssid == NULL
+	 * - bssid is broadcast address (ff:ff:ff:ff:ff:ff)
 	 * - bssid is our MAC address
 	 */
-	if (bssid && memcmp(ndev->dev_addr, bssid, ETH_ALEN)) {
+	if (bssid && !is_broadcast_ether_addr(bssid) &&
+	    !ether_addr_equal_unaligned(ndev->dev_addr, bssid)) {
 		cid = wil_find_cid(wil, bssid);
 		wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
 			     bssid, cid, reason_code);
@@ -420,7 +422,7 @@
 		wil->sta[cid].status = wil_sta_connected;
 		netif_tx_wake_all_queues(ndev);
 	} else {
-		wil->sta[cid].status = wil_sta_unused;
+		wil_disconnect_cid(wil, cid, WLAN_REASON_UNSPECIFIED, true);
 	}
 }
 
@@ -765,6 +767,8 @@
 	if (wil->hw_version == HW_VER_UNKNOWN)
 		return -ENODEV;
 
+	set_bit(wil_status_resetting, wil->status);
+
 	cancel_work_sync(&wil->disconnect_worker);
 	wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
 	wil_bcast_fini(wil);
@@ -851,6 +855,12 @@
 void wil_fw_error_recovery(struct wil6210_priv *wil)
 {
 	wil_dbg_misc(wil, "starting fw error recovery\n");
+
+	if (test_bit(wil_status_resetting, wil->status)) {
+		wil_info(wil, "Reset already in progress\n");
+		return;
+	}
+
 	wil->recovery_state = fw_recovery_pending;
 	schedule_work(&wil->fw_error_worker);
 }
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index feff1ef..1a3142c3 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -260,6 +260,7 @@
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
 
 #ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 
 static int wil6210_suspend(struct device *dev, bool is_runtime)
 {
@@ -307,7 +308,6 @@
 	return rc;
 }
 
-#ifdef CONFIG_PM_SLEEP
 static int wil6210_pm_suspend(struct device *dev)
 {
 	return wil6210_suspend(dev, false);
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 8a8cdc6..5ca0307 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -110,7 +110,7 @@
 	 */
 	for (i = 0; i < num_descriptors; i++) {
 		struct vring_tx_desc *_d = &pmc->pring_va[i];
-		struct vring_tx_desc dd, *d = &dd;
+		struct vring_tx_desc dd = {}, *d = &dd;
 		int j = 0;
 
 		pmc->descriptors[i].va = dma_alloc_coherent(dev,
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 9238c1a..e3d1be8 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -205,6 +205,32 @@
 	spin_unlock(&sta->tid_rx_lock);
 }
 
+/* process BAR frame, called in NAPI context */
+void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq)
+{
+	struct wil_sta_info *sta = &wil->sta[cid];
+	struct wil_tid_ampdu_rx *r;
+
+	spin_lock(&sta->tid_rx_lock);
+
+	r = sta->tid_rx[tid];
+	if (!r) {
+		wil_err(wil, "BAR for non-existing CID %d TID %d\n", cid, tid);
+		goto out;
+	}
+	if (seq_less(seq, r->head_seq_num)) {
+		wil_err(wil, "BAR Seq 0x%03x preceding head 0x%03x\n",
+			seq, r->head_seq_num);
+		goto out;
+	}
+	wil_dbg_txrx(wil, "BAR: CID %d TID %d Seq 0x%03x head 0x%03x\n",
+		     cid, tid, seq, r->head_seq_num);
+	wil_release_reorder_frames(wil, r, seq);
+
+out:
+	spin_unlock(&sta->tid_rx_lock);
+}
+
 struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
 						int size, u16 ssn)
 {
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 6229110..3bc9bc0 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -358,6 +358,13 @@
 	}
 }
 
+/* similar to ieee80211_ version, but FC contain only 1-st byte */
+static inline int wil_is_back_req(u8 fc)
+{
+	return (fc & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+	       (IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
+}
+
 /**
  * reap 1 frame from @swhead
  *
@@ -379,14 +386,16 @@
 	u16 dmalen;
 	u8 ftype;
 	int cid;
-	int i = (int)vring->swhead;
+	int i;
 	struct wil_net_stats *stats;
 
 	BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
 
+again:
 	if (unlikely(wil_vring_is_empty(vring)))
 		return NULL;
 
+	i = (int)vring->swhead;
 	_d = &vring->va[i].rx;
 	if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) {
 		/* it is not error, we just reached end of Rx done area */
@@ -398,7 +407,7 @@
 	wil_vring_advance_head(vring, 1);
 	if (!skb) {
 		wil_err(wil, "No Rx skb at [%d]\n", i);
-		return NULL;
+		goto again;
 	}
 	d = wil_skb_rxdesc(skb);
 	*d = *_d;
@@ -409,13 +418,17 @@
 
 	trace_wil6210_rx(i, d);
 	wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen);
-	wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
+	wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
 			  (const void *)d, sizeof(*d), false);
 
+	cid = wil_rxdesc_cid(d);
+	stats = &wil->sta[cid].stats;
+
 	if (unlikely(dmalen > sz)) {
 		wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
+		stats->rx_large_frame++;
 		kfree_skb(skb);
-		return NULL;
+		goto again;
 	}
 	skb_trim(skb, dmalen);
 
@@ -424,8 +437,6 @@
 	wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
 			  skb->data, skb_headlen(skb), false);
 
-	cid = wil_rxdesc_cid(d);
-	stats = &wil->sta[cid].stats;
 	stats->last_mcs_rx = wil_rxdesc_mcs(d);
 	if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
 		stats->rx_per_mcs[stats->last_mcs_rx]++;
@@ -437,24 +448,47 @@
 	/* no extra checks if in sniffer mode */
 	if (ndev->type != ARPHRD_ETHER)
 		return skb;
-	/*
-	 * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
+	/* Non-data frames may be delivered through Rx DMA channel (ex: BAR)
 	 * Driver should recognize it by frame type, that is found
 	 * in Rx descriptor. If type is not data, it is 802.11 frame as is
 	 */
 	ftype = wil_rxdesc_ftype(d) << 2;
 	if (unlikely(ftype != IEEE80211_FTYPE_DATA)) {
-		wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
-		/* TODO: process it */
+		u8 fc1 = wil_rxdesc_fc1(d);
+		int mid = wil_rxdesc_mid(d);
+		int tid = wil_rxdesc_tid(d);
+		u16 seq = wil_rxdesc_seq(d);
+
+		wil_dbg_txrx(wil,
+			     "Non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+			     fc1, mid, cid, tid, seq);
+		stats->rx_non_data_frame++;
+		if (wil_is_back_req(fc1)) {
+			wil_dbg_txrx(wil,
+				     "BAR: MID %d CID %d TID %d Seq 0x%03x\n",
+				     mid, cid, tid, seq);
+			wil_rx_bar(wil, cid, tid, seq);
+		} else {
+			/* print again all info. One can enable only this
+			 * without overhead for printing every Rx frame
+			 */
+			wil_dbg_txrx(wil,
+				     "Unhandled non-data frame FC[7:0] 0x%02x MID %d CID %d TID %d Seq 0x%03x\n",
+				     fc1, mid, cid, tid, seq);
+			wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4,
+					  (const void *)d, sizeof(*d), false);
+			wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
+					  skb->data, skb_headlen(skb), false);
+		}
 		kfree_skb(skb);
-		return NULL;
+		goto again;
 	}
 
 	if (unlikely(skb->len < ETH_HLEN + snaplen)) {
 		wil_err(wil, "Short frame, len = %d\n", skb->len);
-		/* TODO: process it (i.e. BAR) */
+		stats->rx_short_frame++;
 		kfree_skb(skb);
-		return NULL;
+		goto again;
 	}
 
 	/* L4 IDENT is on when HW calculated checksum, check status
@@ -1208,6 +1242,7 @@
 	int tcp_hdr_len;
 	int skb_net_hdr_len;
 	int gso_type;
+	int rc = -EINVAL;
 
 	wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
 		     __func__, skb->len, vring_index);
@@ -1299,8 +1334,9 @@
 				     len, rem_data, descs_used);
 
 			if (descs_used == avail)  {
-				wil_err(wil, "TSO: ring overflow\n");
-				goto dma_error;
+				wil_err_ratelimited(wil, "TSO: ring overflow\n");
+				rc = -ENOMEM;
+				goto mem_error;
 			}
 
 			lenmss = min_t(int, rem_data, len);
@@ -1322,8 +1358,10 @@
 				headlen -= lenmss;
 			}
 
-			if (unlikely(dma_mapping_error(dev, pa)))
-				goto dma_error;
+			if (unlikely(dma_mapping_error(dev, pa))) {
+				wil_err(wil, "TSO: DMA map page error\n");
+				goto mem_error;
+			}
 
 			_desc = &vring->va[i].tx;
 
@@ -1422,8 +1460,8 @@
 	}
 
 	/* advance swhead */
-	wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
 	wil_vring_advance_head(vring, descs_used);
+	wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
 
 	/* make sure all writes to descriptors (shared memory) are done before
 	 * committing them to HW
@@ -1433,8 +1471,7 @@
 	wil_w(wil, vring->hwtail, vring->swhead);
 	return 0;
 
-dma_error:
-	wil_err(wil, "TSO: DMA map page error\n");
+mem_error:
 	while (descs_used > 0) {
 		struct wil_ctx *ctx;
 
@@ -1445,14 +1482,11 @@
 		_desc->dma.status = TX_DMA_STATUS_DU;
 		ctx = &vring->ctx[i];
 		wil_txdesc_unmap(dev, d, ctx);
-		if (ctx->skb)
-			dev_kfree_skb_any(ctx->skb);
 		memset(ctx, 0, sizeof(*ctx));
 		descs_used--;
 	}
-
 err_exit:
-	return -EINVAL;
+	return rc;
 }
 
 static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
@@ -1528,8 +1562,11 @@
 		_d = &vring->va[i].tx;
 		pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
 				      DMA_TO_DEVICE);
-		if (unlikely(dma_mapping_error(dev, pa)))
+		if (unlikely(dma_mapping_error(dev, pa))) {
+			wil_err(wil, "Tx[%2d] failed to map fragment\n",
+				vring_index);
 			goto dma_error;
+		}
 		vring->ctx[i].mapped_as = wil_mapped_as_page;
 		wil_tx_desc_map(d, pa, len, vring_index);
 		/* no need to check return code -
@@ -1589,9 +1626,6 @@
 		_d->dma.status = TX_DMA_STATUS_DU;
 		wil_txdesc_unmap(dev, d, ctx);
 
-		if (ctx->skb)
-			dev_kfree_skb_any(ctx->skb);
-
 		memset(ctx, 0, sizeof(*ctx));
 	}
 
@@ -1633,7 +1667,7 @@
 		goto drop;
 	}
 	if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
-		wil_err(wil, "FW not connected\n");
+		wil_err_ratelimited(wil, "FW not connected\n");
 		goto drop;
 	}
 	if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 82a8f9a..ee7c7b4 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -464,6 +464,12 @@
 	return WIL_GET_BITS(d->mac.d0, 12, 15);
 }
 
+/* 1-st byte (with frame type/subtype) of FC field */
+static inline u8 wil_rxdesc_fc1(struct vring_rx_desc *d)
+{
+	return (u8)(WIL_GET_BITS(d->mac.d0, 10, 15) << 2);
+}
+
 static inline int wil_rxdesc_seq(struct vring_rx_desc *d)
 {
 	return WIL_GET_BITS(d->mac.d0, 16, 27);
@@ -501,6 +507,7 @@
 
 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
 void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
+void wil_rx_bar(struct wil6210_priv *wil, u8 cid, u8 tid, u16 seq);
 struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
 						int size, u16 ssn);
 void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index dd4ea92..ade5f3b8 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -246,6 +246,10 @@
 #define RGF_USER_JTAG_DEV_ID	(0x880b34) /* device ID */
 	#define JTAG_DEV_ID_SPARROW_B0	(0x2632072f)
 
+/* crash codes for FW/Ucode stored here */
+#define RGF_FW_ASSERT_CODE		(0x91f020)
+#define RGF_UCODE_ASSERT_CODE		(0x91f028)
+
 enum {
 	HW_VER_UNKNOWN,
 	HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */
@@ -398,13 +402,14 @@
 };
 
 enum { /* for wil6210_priv.status */
-	wil_status_fwready = 0,
+	wil_status_fwready = 0, /* FW operational */
 	wil_status_fwconnecting,
 	wil_status_fwconnected,
 	wil_status_dontscan,
-	wil_status_reset_done,
+	wil_status_mbox_ready, /* MBOX structures ready */
 	wil_status_irqen, /* FIXME: interrupts enabled - for debug */
 	wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
+	wil_status_resetting, /* reset in progress */
 	wil_status_last /* keep last */
 };
 
@@ -465,6 +470,9 @@
 	unsigned long	tx_bytes;
 	unsigned long	tx_errors;
 	unsigned long	rx_dropped;
+	unsigned long	rx_non_data_frame;
+	unsigned long	rx_short_frame;
+	unsigned long	rx_large_frame;
 	u16 last_mcs_rx;
 	u64 rx_per_mcs[WIL_MCS_MAX + 1];
 };
@@ -820,4 +828,6 @@
 int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
 int wil_resume(struct wil6210_priv *wil, bool is_runtime);
 
+void wil_fw_core_dump(struct wil6210_priv *wil);
+
 #endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
new file mode 100644
index 0000000..7e70934
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+#include <linux/devcoredump.h>
+
+static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
+					u32 *out_dump_size, u32 *out_host_min)
+{
+	int i;
+	const struct fw_map *map;
+	u32 host_min, host_max, tmp_max;
+
+	if (!out_dump_size)
+		return -EINVAL;
+
+	/* calculate the total size of the unpacked crash dump */
+	BUILD_BUG_ON(ARRAY_SIZE(fw_mapping) == 0);
+	map = &fw_mapping[0];
+	host_min = map->host;
+	host_max = map->host + (map->to - map->from);
+
+	for (i = 1; i < ARRAY_SIZE(fw_mapping); i++) {
+		map = &fw_mapping[i];
+
+		if (map->host < host_min)
+			host_min = map->host;
+
+		tmp_max = map->host + (map->to - map->from);
+		if (tmp_max > host_max)
+			host_max = tmp_max;
+	}
+
+	*out_dump_size = host_max - host_min;
+	if (out_host_min)
+		*out_host_min = host_min;
+
+	return 0;
+}
+
+static int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest,
+				  u32 size)
+{
+	int i;
+	const struct fw_map *map;
+	void *data;
+	u32 host_min, dump_size, offset, len;
+
+	if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) {
+		wil_err(wil, "%s: fail to obtain crash dump size\n", __func__);
+		return -EINVAL;
+	}
+
+	if (dump_size > size) {
+		wil_err(wil, "%s: not enough space for dump. Need %d have %d\n",
+			__func__, dump_size, size);
+		return -EINVAL;
+	}
+
+	/* copy to crash dump area */
+	for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
+		map = &fw_mapping[i];
+
+		data = (void * __force)wil->csr + HOSTADDR(map->host);
+		len = map->to - map->from;
+		offset = map->host - host_min;
+
+		wil_dbg_misc(wil, "%s() - dump %s, size %d, offset %d\n",
+			     __func__, fw_mapping[i].name, len, offset);
+
+		wil_memcpy_fromio_32((void * __force)(dest + offset),
+				     (const void __iomem * __force)data, len);
+	}
+
+	return 0;
+}
+
+void wil_fw_core_dump(struct wil6210_priv *wil)
+{
+	void *fw_dump_data;
+	u32 fw_dump_size;
+
+	if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) {
+		wil_err(wil, "%s: fail to get fw dump size\n", __func__);
+		return;
+	}
+
+	fw_dump_data = vzalloc(fw_dump_size);
+	if (!fw_dump_data)
+		return;
+
+	if (wil_fw_copy_crash_dump(wil, fw_dump_data, fw_dump_size)) {
+		vfree(fw_dump_data);
+		return;
+	}
+	/* fw_dump_data will be free in device coredump release function
+	 * after 5 min
+	 */
+	dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL);
+	wil_info(wil, "%s: fw core dumped, size %d bytes\n", __func__,
+		 fw_dump_size);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 2f35d4c..6ed26ba 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -293,12 +293,6 @@
 	/* ignore MAC address, we already have it from the boot loader */
 	snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
 		 "%d", wil->fw_version);
-}
-
-static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
-			     int len)
-{
-	wil_dbg_wmi(wil, "WMI: got FW ready event\n");
 
 	wil_set_recovery_state(wil, fw_recovery_idle);
 	set_bit(wil_status_fwready, wil->status);
@@ -684,13 +678,22 @@
 	spin_unlock_bh(&sta->tid_rx_lock);
 }
 
+/**
+ * Some events are ignored for purpose; and need not be interpreted as
+ * "unhandled events"
+ */
+static void wmi_evt_ignore(struct wil6210_priv *wil, int id, void *d, int len)
+{
+	wil_dbg_wmi(wil, "Ignore event 0x%04x len %d\n", id, len);
+}
+
 static const struct {
 	int eventid;
 	void (*handler)(struct wil6210_priv *wil, int eventid,
 			void *data, int data_len);
 } wmi_evt_handlers[] = {
 	{WMI_READY_EVENTID,		wmi_evt_ready},
-	{WMI_FW_READY_EVENTID,		wmi_evt_fw_ready},
+	{WMI_FW_READY_EVENTID,			wmi_evt_ignore},
 	{WMI_RX_MGMT_PACKET_EVENTID,	wmi_evt_rx_mgmt},
 	{WMI_TX_MGMT_PACKET_EVENTID,		wmi_evt_tx_mgmt},
 	{WMI_SCAN_COMPLETE_EVENTID,	wmi_evt_scan_complete},
@@ -701,6 +704,7 @@
 	{WMI_RCP_ADDBA_REQ_EVENTID,	wmi_evt_addba_rx_req},
 	{WMI_DELBA_EVENTID,		wmi_evt_delba},
 	{WMI_VRING_EN_EVENTID,		wmi_evt_vring_en},
+	{WMI_DATA_PORT_OPEN_EVENTID,		wmi_evt_ignore},
 };
 
 /*
@@ -720,7 +724,7 @@
 	ulong flags;
 	unsigned n;
 
-	if (!test_bit(wil_status_reset_done, wil->status)) {
+	if (!test_bit(wil_status_mbox_ready, wil->status)) {
 		wil_err(wil, "Reset in progress. Cannot handle WMI event\n");
 		return;
 	}
@@ -1120,7 +1124,7 @@
 			cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
 		cmd.sniffer_cfg.phy_support =
 			cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
-				    ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+				    ? WMI_SNIFFER_CP : WMI_SNIFFER_BOTH_PHYS);
 	} else {
 		/* Initialize offload (in non-sniffer mode).
 		 * Linux IP stack always calculates IP checksum
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 759fb8d..fba8560 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -71,26 +71,6 @@
 	select SSB_DRIVER_PCICORE
 	default y
 
-config B43_PCMCIA
-	bool "Broadcom 43xx PCMCIA device support"
-	depends on B43 && B43_SSB && SSB_PCMCIAHOST_POSSIBLE
-	select SSB_PCMCIAHOST
-	---help---
-	  Broadcom 43xx PCMCIA device support.
-
-	  Support for 16bit PCMCIA devices.
-	  Please note that most PC-CARD devices are _NOT_ 16bit PCMCIA
-	  devices, but 32bit CardBUS devices. CardBUS devices are supported
-	  out of the box by b43.
-
-	  With this config option you can drive b43 cards in
-	  CompactFlash formfactor in a PCMCIA adaptor.
-	  CF b43 cards can sometimes be found in handheld PCs.
-
-	  It's safe to select Y here, even if you don't have a B43 PCMCIA device.
-
-	  If unsure, say N.
-
 config B43_SDIO
 	bool "Broadcom 43xx SDIO device support"
 	depends on B43 && B43_SSB && SSB_SDIOHOST_POSSIBLE
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index c624d4d..ddc4df4 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -21,7 +21,6 @@
 b43-y				+= rfkill.o
 b43-y				+= ppr.o
 b43-$(CONFIG_B43_LEDS)		+= leds.o
-b43-$(CONFIG_B43_PCMCIA)	+= pcmcia.o
 b43-$(CONFIG_B43_SDIO)		+= sdio.o
 b43-$(CONFIG_B43_DEBUG)		+= debugfs.o
 
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 2849070..ec013fb 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -56,7 +56,6 @@
 #include "sysfs.h"
 #include "xmit.h"
 #include "lo.h"
-#include "pcmcia.h"
 #include "sdio.h"
 #include <linux/mmc/sdio_func.h>
 
@@ -120,6 +119,7 @@
 #ifdef CONFIG_B43_BCMA
 static const struct bcma_device_id b43_bcma_tbl[] = {
 	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x15, BCMA_ANY_CLASS),
 	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
 	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
 	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1C, BCMA_ANY_CLASS),
@@ -5849,12 +5849,9 @@
 	int err;
 
 	b43_debugfs_init();
-	err = b43_pcmcia_init();
-	if (err)
-		goto err_dfs_exit;
 	err = b43_sdio_init();
 	if (err)
-		goto err_pcmcia_exit;
+		goto err_dfs_exit;
 #ifdef CONFIG_B43_BCMA
 	err = bcma_driver_register(&b43_bcma_driver);
 	if (err)
@@ -5877,8 +5874,6 @@
 err_sdio_exit:
 #endif
 	b43_sdio_exit();
-err_pcmcia_exit:
-	b43_pcmcia_exit();
 err_dfs_exit:
 	b43_debugfs_exit();
 	return err;
@@ -5893,7 +5888,6 @@
 	bcma_driver_unregister(&b43_bcma_driver);
 #endif
 	b43_sdio_exit();
-	b43_pcmcia_exit();
 	b43_debugfs_exit();
 }
 
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
deleted file mode 100644
index 55f2bd7..0000000
--- a/drivers/net/wireless/b43/pcmcia.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
-
-  Broadcom B43 wireless driver
-
-  Copyright (c) 2007 Michael Buesch <m@bues.ch>
-
-  This program is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 2 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program; see the file COPYING.  If not, write to
-  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
-  Boston, MA 02110-1301, USA.
-
-*/
-
-#include "pcmcia.h"
-
-#include <linux/ssb/ssb.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include <pcmcia/cistpl.h>
-#include <pcmcia/ciscode.h>
-#include <pcmcia/ds.h>
-#include <pcmcia/cisreg.h>
-
-
-static const struct pcmcia_device_id b43_pcmcia_tbl[] = {
-	PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448),
-	PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476),
-	PCMCIA_DEVICE_NULL,
-};
-
-MODULE_DEVICE_TABLE(pcmcia, b43_pcmcia_tbl);
-
-#ifdef CONFIG_PM
-static int b43_pcmcia_suspend(struct pcmcia_device *dev)
-{
-	struct ssb_bus *ssb = dev->priv;
-
-	return ssb_bus_suspend(ssb);
-}
-
-static int b43_pcmcia_resume(struct pcmcia_device *dev)
-{
-	struct ssb_bus *ssb = dev->priv;
-
-	return ssb_bus_resume(ssb);
-}
-#else /* CONFIG_PM */
-# define b43_pcmcia_suspend		NULL
-# define b43_pcmcia_resume		NULL
-#endif /* CONFIG_PM */
-
-static int b43_pcmcia_probe(struct pcmcia_device *dev)
-{
-	struct ssb_bus *ssb;
-	int err = -ENOMEM;
-	int res = 0;
-
-	ssb = kzalloc(sizeof(*ssb), GFP_KERNEL);
-	if (!ssb)
-		goto out_error;
-
-	err = -ENODEV;
-
-	dev->config_flags |= CONF_ENABLE_IRQ;
-
-	dev->resource[2]->flags |=  WIN_ENABLE | WIN_DATA_WIDTH_16 |
-			 WIN_USE_WAIT;
-	dev->resource[2]->start = 0;
-	dev->resource[2]->end = SSB_CORE_SIZE;
-	res = pcmcia_request_window(dev, dev->resource[2], 250);
-	if (res != 0)
-		goto err_kfree_ssb;
-
-	res = pcmcia_map_mem_page(dev, dev->resource[2], 0);
-	if (res != 0)
-		goto err_disable;
-
-	if (!dev->irq)
-		goto err_disable;
-
-	res = pcmcia_enable_device(dev);
-	if (res != 0)
-		goto err_disable;
-
-	err = ssb_bus_pcmciabus_register(ssb, dev, dev->resource[2]->start);
-	if (err)
-		goto err_disable;
-	dev->priv = ssb;
-
-	return 0;
-
-err_disable:
-	pcmcia_disable_device(dev);
-err_kfree_ssb:
-	kfree(ssb);
-out_error:
-	printk(KERN_ERR "b43-pcmcia: Initialization failed (%d, %d)\n",
-	       res, err);
-	return err;
-}
-
-static void b43_pcmcia_remove(struct pcmcia_device *dev)
-{
-	struct ssb_bus *ssb = dev->priv;
-
-	ssb_bus_unregister(ssb);
-	pcmcia_disable_device(dev);
-	kfree(ssb);
-	dev->priv = NULL;
-}
-
-static struct pcmcia_driver b43_pcmcia_driver = {
-	.owner		= THIS_MODULE,
-	.name		= "b43-pcmcia",
-	.id_table	= b43_pcmcia_tbl,
-	.probe		= b43_pcmcia_probe,
-	.remove		= b43_pcmcia_remove,
-	.suspend	= b43_pcmcia_suspend,
-	.resume		= b43_pcmcia_resume,
-};
-
-/*
- * These are not module init/exit functions!
- * The module_pcmcia_driver() helper cannot be used here.
- */
-int b43_pcmcia_init(void)
-{
-	return pcmcia_register_driver(&b43_pcmcia_driver);
-}
-
-void b43_pcmcia_exit(void)
-{
-	pcmcia_unregister_driver(&b43_pcmcia_driver);
-}
diff --git a/drivers/net/wireless/b43/pcmcia.h b/drivers/net/wireless/b43/pcmcia.h
deleted file mode 100644
index 85f120a..0000000
--- a/drivers/net/wireless/b43/pcmcia.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef B43_PCMCIA_H_
-#define B43_PCMCIA_H_
-
-#ifdef CONFIG_B43_PCMCIA
-
-int b43_pcmcia_init(void);
-void b43_pcmcia_exit(void);
-
-#else /* CONFIG_B43_PCMCIA */
-
-static inline int b43_pcmcia_init(void)
-{
-	return 0;
-}
-static inline void b43_pcmcia_exit(void)
-{
-}
-
-#endif /* CONFIG_B43_PCMCIA */
-#endif /* B43_PCMCIA_H_ */
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index fe3dc12..ab42b1f 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -82,5 +82,6 @@
 config BRCMDBG
 	bool "Broadcom driver debug functions"
 	depends on BRCMSMAC || BRCMFMAC
+	select WANT_DEV_COREDUMP
 	---help---
 	  Selecting this enables additional code for debug purposes.
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
index 8e0e91c..288c84e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.c
@@ -272,10 +272,11 @@
 }
 
 static int
-brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
-			 struct sk_buff *pktbuf)
+brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws,
+			 struct sk_buff *pktbuf, struct brcmf_if **ifp)
 {
 	struct brcmf_proto_bcdc_header *h;
+	struct brcmf_if *tmp_if;
 
 	brcmf_dbg(BCDC, "Enter\n");
 
@@ -289,30 +290,21 @@
 	trace_brcmf_bcdchdr(pktbuf->data);
 	h = (struct brcmf_proto_bcdc_header *)(pktbuf->data);
 
-	*ifidx = BCDC_GET_IF_IDX(h);
-	if (*ifidx >= BRCMF_MAX_IFS) {
-		brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
+	tmp_if = brcmf_get_ifp(drvr, BCDC_GET_IF_IDX(h));
+	if (!tmp_if) {
+		brcmf_dbg(INFO, "no matching ifp found\n");
 		return -EBADE;
 	}
-	/* The ifidx is the idx to map to matching netdev/ifp. When receiving
-	 * events this is easy because it contains the bssidx which maps
-	 * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
-	 * bssidx 1 is used for p2p0 and no data can be received or
-	 * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
-	 */
-	if (*ifidx)
-		(*ifidx)++;
-
 	if (((h->flags & BCDC_FLAG_VER_MASK) >> BCDC_FLAG_VER_SHIFT) !=
 	    BCDC_PROTO_VER) {
 		brcmf_err("%s: non-BCDC packet received, flags 0x%x\n",
-			  brcmf_ifname(drvr, *ifidx), h->flags);
+			  brcmf_ifname(drvr, tmp_if->ifidx), h->flags);
 		return -EBADE;
 	}
 
 	if (h->flags & BCDC_FLAG_SUM_GOOD) {
 		brcmf_dbg(BCDC, "%s: BDC rcv, good checksum, flags 0x%x\n",
-			  brcmf_ifname(drvr, *ifidx), h->flags);
+			  brcmf_ifname(drvr, tmp_if->ifidx), h->flags);
 		pktbuf->ip_summed = CHECKSUM_UNNECESSARY;
 	}
 
@@ -320,12 +312,14 @@
 
 	skb_pull(pktbuf, BCDC_HEADER_LEN);
 	if (do_fws)
-		brcmf_fws_hdrpull(drvr, *ifidx, h->data_offset << 2, pktbuf);
+		brcmf_fws_hdrpull(tmp_if, h->data_offset << 2, pktbuf);
 	else
 		skb_pull(pktbuf, h->data_offset << 2);
 
 	if (pktbuf->len == 0)
 		return -ENODATA;
+
+	*ifp = tmp_if;
 	return 0;
 }
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
index 0445163..4e33f96 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/btcoex.c
@@ -149,7 +149,7 @@
 static void brcmf_btcoex_boost_wifi(struct brcmf_btcoex_info *btci,
 				    bool trump_sco)
 {
-	struct brcmf_if *ifp = btci->cfg->pub->iflist[0];
+	struct brcmf_if *ifp = brcmf_get_ifp(btci->cfg->pub, 0);
 
 	if (trump_sco && !btci->saved_regs_part2) {
 		/* this should reduce eSCO agressive
@@ -468,7 +468,7 @@
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_priv(vif->wdev.wiphy);
 	struct brcmf_btcoex_info *btci = cfg->btcoex;
-	struct brcmf_if *ifp = cfg->pub->iflist[0];
+	struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0);
 
 	switch (mode) {
 	case BRCMF_BTCOEX_DISABLED:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
index 89e6a4d..230cad7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bus.h
@@ -65,6 +65,8 @@
  * @rxctl: receive a control response message from dongle.
  * @gettxq: obtain a reference of bus transmit queue (optional).
  * @wowl_config: specify if dongle is configured for wowl when going to suspend
+ * @get_ramsize: obtain size of device memory.
+ * @get_memdump: obtain device memory dump in provided buffer.
  *
  * This structure provides an abstract interface towards the
  * bus specific driver. For control messages to common driver
@@ -79,6 +81,8 @@
 	int (*rxctl)(struct device *dev, unsigned char *msg, uint len);
 	struct pktq * (*gettxq)(struct device *dev);
 	void (*wowl_config)(struct device *dev, bool enabled);
+	size_t (*get_ramsize)(struct device *dev);
+	int (*get_memdump)(struct device *dev, void *data, size_t len);
 };
 
 
@@ -185,6 +189,23 @@
 		bus->ops->wowl_config(bus->dev, enabled);
 }
 
+static inline size_t brcmf_bus_get_ramsize(struct brcmf_bus *bus)
+{
+	if (!bus->ops->get_ramsize)
+		return 0;
+
+	return bus->ops->get_ramsize(bus->dev);
+}
+
+static inline
+int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len)
+{
+	if (!bus->ops->get_memdump)
+		return -EOPNOTSUPP;
+
+	return bus->ops->get_memdump(bus->dev, data, len);
+}
+
 /*
  * interface functions from common layer
  */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index a293275..deb5f78 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -236,89 +236,6 @@
 module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR);
 MODULE_PARM_DESC(roamoff, "do not use internal roaming engine");
 
-/* Quarter dBm units to mW
- * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
- * Table is offset so the last entry is largest mW value that fits in
- * a u16.
- */
-
-#define QDBM_OFFSET 153		/* Offset for first entry */
-#define QDBM_TABLE_LEN 40	/* Table size */
-
-/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET.
- * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2
- */
-#define QDBM_TABLE_LOW_BOUND 6493	/* Low bound */
-
-/* Largest mW value that will round down to the last table entry,
- * QDBM_OFFSET + QDBM_TABLE_LEN-1.
- * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) +
- * mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2.
- */
-#define QDBM_TABLE_HIGH_BOUND 64938	/* High bound */
-
-static const u16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = {
-/* qdBm:	+0	+1	+2	+3	+4	+5	+6	+7 */
-/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000,
-/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849,
-/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119,
-/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811,
-/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096
-};
-
-static u16 brcmf_qdbm_to_mw(u8 qdbm)
-{
-	uint factor = 1;
-	int idx = qdbm - QDBM_OFFSET;
-
-	if (idx >= QDBM_TABLE_LEN)
-		/* clamp to max u16 mW value */
-		return 0xFFFF;
-
-	/* scale the qdBm index up to the range of the table 0-40
-	 * where an offset of 40 qdBm equals a factor of 10 mW.
-	 */
-	while (idx < 0) {
-		idx += 40;
-		factor *= 10;
-	}
-
-	/* return the mW value scaled down to the correct factor of 10,
-	 * adding in factor/2 to get proper rounding.
-	 */
-	return (nqdBm_to_mW_map[idx] + factor / 2) / factor;
-}
-
-static u8 brcmf_mw_to_qdbm(u16 mw)
-{
-	u8 qdbm;
-	int offset;
-	uint mw_uint = mw;
-	uint boundary;
-
-	/* handle boundary case */
-	if (mw_uint <= 1)
-		return 0;
-
-	offset = QDBM_OFFSET;
-
-	/* move mw into the range of the table */
-	while (mw_uint < QDBM_TABLE_LOW_BOUND) {
-		mw_uint *= 10;
-		offset -= 40;
-	}
-
-	for (qdbm = 0; qdbm < QDBM_TABLE_LEN - 1; qdbm++) {
-		boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm + 1] -
-						    nqdBm_to_mW_map[qdbm]) / 2;
-		if (mw_uint < boundary)
-			break;
-	}
-
-	qdbm += (u8) offset;
-
-	return qdbm;
-}
 
 static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
 			       struct cfg80211_chan_def *ch)
@@ -860,6 +777,37 @@
 	s32 err = 0;
 
 	brcmf_dbg(TRACE, "Enter, idx=%d, type=%d\n", ifp->bssidx, type);
+
+	/* WAR: There are a number of p2p interface related problems which
+	 * need to be handled initially (before doing the validate).
+	 * wpa_supplicant tends to do iface changes on p2p device/client/go
+	 * which are not always possible/allowed. However we need to return
+	 * OK otherwise the wpa_supplicant wont start. The situation differs
+	 * on configuration and setup (p2pon=1 module param). The first check
+	 * is to see if the request is a change to station for p2p iface.
+	 */
+	if ((type == NL80211_IFTYPE_STATION) &&
+	    ((vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) ||
+	     (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) ||
+	     (vif->wdev.iftype == NL80211_IFTYPE_P2P_DEVICE))) {
+		brcmf_dbg(TRACE, "Ignoring cmd for p2p if\n");
+		/* Now depending on whether module param p2pon=1 was used the
+		 * response needs to be either 0 or EOPNOTSUPP. The reason is
+		 * that if p2pon=1 is used, but a newer supplicant is used then
+		 * we should return an error, as this combination wont work.
+		 * In other situations 0 is returned and supplicant will start
+		 * normally. It will give a trace in cfg80211, but it is the
+		 * only way to get it working. Unfortunately this will result
+		 * in situation where we wont support new supplicant in
+		 * combination with module param p2pon=1, but that is the way
+		 * it is. If the user tries this then unloading of driver might
+		 * fail/lock.
+		 */
+		if (cfg->p2p.p2pdev_dynamically)
+			return -EOPNOTSUPP;
+		else
+			return 0;
+	}
 	err = brcmf_vif_change_validate(wiphy_to_cfg(wiphy), vif, type);
 	if (err) {
 		brcmf_err("iface validation failed: err=%d\n", err);
@@ -875,18 +823,6 @@
 		infra = 0;
 		break;
 	case NL80211_IFTYPE_STATION:
-		/* Ignore change for p2p IF. Unclear why supplicant does this */
-		if ((vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) ||
-		    (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) {
-			brcmf_dbg(TRACE, "Ignoring cmd for p2p if\n");
-			/* WAR: It is unexpected to get a change of VIF for P2P
-			 * IF, but it happens. The request can not be handled
-			 * but returning EPERM causes a crash. Returning 0
-			 * without setting ieee80211_ptr->iftype causes trace
-			 * (WARN_ON) but it works with wpa_supplicant
-			 */
-			return 0;
-		}
 		infra = 1;
 		break;
 	case NL80211_IFTYPE_AP:
@@ -904,7 +840,6 @@
 			err = brcmf_p2p_ifchange(cfg, BRCMF_FIL_P2P_IF_GO);
 		}
 		if (!err) {
-			set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
 			brcmf_dbg(INFO, "IF Type = AP\n");
 		}
 	} else {
@@ -2017,16 +1952,14 @@
 brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
 			    enum nl80211_tx_power_setting type, s32 mbm)
 {
-
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
 	struct net_device *ndev = cfg_to_ndev(cfg);
 	struct brcmf_if *ifp = netdev_priv(ndev);
-	u16 txpwrmw;
-	s32 err = 0;
-	s32 disable = 0;
-	s32 dbm = MBM_TO_DBM(mbm);
+	s32 err;
+	s32 disable;
+	u32 qdbm = 127;
 
-	brcmf_dbg(TRACE, "Enter\n");
+	brcmf_dbg(TRACE, "Enter %d %d\n", type, mbm);
 	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
@@ -2035,12 +1968,20 @@
 		break;
 	case NL80211_TX_POWER_LIMITED:
 	case NL80211_TX_POWER_FIXED:
-		if (dbm < 0) {
+		if (mbm < 0) {
 			brcmf_err("TX_POWER_FIXED - dbm is negative\n");
 			err = -EINVAL;
 			goto done;
 		}
+		qdbm =  MBM_TO_DBM(4 * mbm);
+		if (qdbm > 127)
+			qdbm = 127;
+		qdbm |= WL_TXPWR_OVERRIDE;
 		break;
+	default:
+		brcmf_err("Unsupported type %d\n", type);
+		err = -EINVAL;
+		goto done;
 	}
 	/* Make sure radio is off or on as far as software is concerned */
 	disable = WL_RADIO_SW_DISABLE << 16;
@@ -2048,52 +1989,44 @@
 	if (err)
 		brcmf_err("WLC_SET_RADIO error (%d)\n", err);
 
-	if (dbm > 0xffff)
-		txpwrmw = 0xffff;
-	else
-		txpwrmw = (u16) dbm;
-	err = brcmf_fil_iovar_int_set(ifp, "qtxpower",
-				      (s32)brcmf_mw_to_qdbm(txpwrmw));
+	err = brcmf_fil_iovar_int_set(ifp, "qtxpower", qdbm);
 	if (err)
 		brcmf_err("qtxpower error (%d)\n", err);
-	cfg->conf->tx_power = dbm;
 
 done:
-	brcmf_dbg(TRACE, "Exit\n");
+	brcmf_dbg(TRACE, "Exit %d (qdbm)\n", qdbm & ~WL_TXPWR_OVERRIDE);
 	return err;
 }
 
-static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy,
-				       struct wireless_dev *wdev,
-				       s32 *dbm)
+static s32
+brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
+			    s32 *dbm)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
-	struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
-	s32 txpwrdbm;
-	u8 result;
-	s32 err = 0;
+	struct net_device *ndev = cfg_to_ndev(cfg);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 qdbm = 0;
+	s32 err;
 
 	brcmf_dbg(TRACE, "Enter\n");
 	if (!check_vif_up(ifp->vif))
 		return -EIO;
 
-	err = brcmf_fil_iovar_int_get(ifp, "qtxpower", &txpwrdbm);
+	err = brcmf_fil_iovar_int_get(ifp, "qtxpower", &qdbm);
 	if (err) {
 		brcmf_err("error (%d)\n", err);
 		goto done;
 	}
-
-	result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
-	*dbm = (s32) brcmf_qdbm_to_mw(result);
+	*dbm = (qdbm & ~WL_TXPWR_OVERRIDE) / 4;
 
 done:
-	brcmf_dbg(TRACE, "Exit\n");
+	brcmf_dbg(TRACE, "Exit (0x%x %d)\n", qdbm, *dbm);
 	return err;
 }
 
 static s32
 brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
-			       u8 key_idx, bool unicast, bool multicast)
+				  u8 key_idx, bool unicast, bool multicast)
 {
 	struct brcmf_if *ifp = netdev_priv(ndev);
 	u32 index;
@@ -2498,6 +2431,9 @@
 	struct brcmf_sta_info_le sta_info_le;
 	u32 sta_flags;
 	u32 is_tdls_peer;
+	s32 total_rssi;
+	s32 count_rssi;
+	u32 i;
 
 	brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
 	if (!check_vif_up(ifp->vif))
@@ -2544,13 +2480,13 @@
 		sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts);
 		if (sinfo->tx_packets) {
 			sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
-			sinfo->txrate.legacy = le32_to_cpu(sta_info_le.tx_rate);
-			sinfo->txrate.legacy /= 100;
+			sinfo->txrate.legacy =
+				le32_to_cpu(sta_info_le.tx_rate) / 100;
 		}
 		if (sinfo->rx_packets) {
 			sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
-			sinfo->rxrate.legacy = le32_to_cpu(sta_info_le.rx_rate);
-			sinfo->rxrate.legacy /= 100;
+			sinfo->rxrate.legacy =
+				le32_to_cpu(sta_info_le.rx_rate) / 100;
 		}
 		if (le16_to_cpu(sta_info_le.ver) >= 4) {
 			sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES);
@@ -2558,12 +2494,61 @@
 			sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES);
 			sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
 		}
+		total_rssi = 0;
+		count_rssi = 0;
+		for (i = 0; i < BRCMF_ANT_MAX; i++) {
+			if (sta_info_le.rssi[i]) {
+				sinfo->chain_signal_avg[count_rssi] =
+					sta_info_le.rssi[i];
+				sinfo->chain_signal[count_rssi] =
+					sta_info_le.rssi[i];
+				total_rssi += sta_info_le.rssi[i];
+				count_rssi++;
+			}
+		}
+		if (count_rssi) {
+			sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL);
+			sinfo->chains = count_rssi;
+
+			sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+			total_rssi /= count_rssi;
+			sinfo->signal = total_rssi;
+		}
 	}
 done:
 	brcmf_dbg(TRACE, "Exit\n");
 	return err;
 }
 
+static int
+brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev,
+			    int idx, u8 *mac, struct station_info *sinfo)
+{
+	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+	struct brcmf_if *ifp = netdev_priv(ndev);
+	s32 err;
+
+	brcmf_dbg(TRACE, "Enter, idx %d\n", idx);
+
+	if (idx == 0) {
+		cfg->assoclist.count = cpu_to_le32(BRCMF_MAX_ASSOCLIST);
+		err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_ASSOCLIST,
+					     &cfg->assoclist,
+					     sizeof(cfg->assoclist));
+		if (err) {
+			brcmf_err("BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n",
+				  err);
+			cfg->assoclist.count = 0;
+			return -EOPNOTSUPP;
+		}
+	}
+	if (idx < le32_to_cpu(cfg->assoclist.count)) {
+		memcpy(mac, cfg->assoclist.mac[idx], ETH_ALEN);
+		return brcmf_cfg80211_get_station(wiphy, ndev, mac, sinfo);
+	}
+	return -ENOENT;
+}
+
 static s32
 brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
 			   bool enabled, s32 timeout)
@@ -4265,8 +4250,8 @@
 
 		brcmf_dbg(TRACE, "GO mode configuration complete\n");
 	}
-	clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
 	set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+	brcmf_net_setcarrier(ifp, true);
 
 exit:
 	if ((err) && (!mbss)) {
@@ -4330,8 +4315,8 @@
 	}
 	brcmf_set_mpc(ifp, 1);
 	brcmf_configure_arp_offload(ifp, true);
-	set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
 	clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+	brcmf_net_setcarrier(ifp, false);
 
 	return err;
 }
@@ -4663,6 +4648,7 @@
 	.join_ibss = brcmf_cfg80211_join_ibss,
 	.leave_ibss = brcmf_cfg80211_leave_ibss,
 	.get_station = brcmf_cfg80211_get_station,
+	.dump_station = brcmf_cfg80211_dump_station,
 	.set_tx_power = brcmf_cfg80211_set_tx_power,
 	.get_tx_power = brcmf_cfg80211_get_tx_power,
 	.add_key = brcmf_cfg80211_add_key,
@@ -4747,7 +4733,8 @@
 	ifp = netdev_priv(ndev);
 	vif = ifp->vif;
 
-	brcmf_free_vif(vif);
+	if (vif)
+		brcmf_free_vif(vif);
 	free_netdev(ndev);
 }
 
@@ -4983,7 +4970,7 @@
 		brcmf_dbg(CONN, "AP mode link down\n");
 		complete(&cfg->vif_disabled);
 		if (ifp->vif->mbss)
-			brcmf_remove_interface(ifp->drvr, ifp->bssidx);
+			brcmf_remove_interface(ifp);
 		return 0;
 	}
 
@@ -5039,6 +5026,7 @@
 				&ifp->vif->sme_state);
 		} else
 			brcmf_bss_connect_done(cfg, ndev, e, true);
+		brcmf_net_setcarrier(ifp, true);
 	} else if (brcmf_is_linkdown(e)) {
 		brcmf_dbg(CONN, "Linkdown\n");
 		if (!brcmf_is_ibssmode(ifp->vif)) {
@@ -5048,6 +5036,7 @@
 		brcmf_init_prof(ndev_to_prof(ndev));
 		if (ndev != cfg_to_ndev(cfg))
 			complete(&cfg->vif_disabled);
+		brcmf_net_setcarrier(ifp, false);
 	} else if (brcmf_is_nonetwork(cfg, e)) {
 		if (brcmf_is_ibssmode(ifp->vif))
 			clear_bit(BRCMF_VIF_STATUS_CONNECTING,
@@ -6211,9 +6200,10 @@
 }
 
 struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
-						  struct device *busdev)
+						  struct device *busdev,
+						  bool p2pdev_forced)
 {
-	struct net_device *ndev = drvr->iflist[0]->ndev;
+	struct net_device *ndev = brcmf_get_ifp(drvr, 0)->ndev;
 	struct brcmf_cfg80211_info *cfg;
 	struct wiphy *wiphy;
 	struct brcmf_cfg80211_vif *vif;
@@ -6302,8 +6292,19 @@
 		else
 			*cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
 	}
+	/* p2p might require that "if-events" get processed by fweh. So
+	 * activate the already registered event handlers now and activate
+	 * the rest when initialization has completed. drvr->config needs to
+	 * be assigned before activating events.
+	 */
+	drvr->config = cfg;
+	err = brcmf_fweh_activate_events(ifp);
+	if (err) {
+		brcmf_err("FWEH activation failed (%d)\n", err);
+		goto wiphy_unreg_out;
+	}
 
-	err = brcmf_p2p_attach(cfg);
+	err = brcmf_p2p_attach(cfg, p2pdev_forced);
 	if (err) {
 		brcmf_err("P2P initilisation failed (%d)\n", err);
 		goto wiphy_unreg_out;
@@ -6324,6 +6325,13 @@
 				    brcmf_notify_tdls_peer_event);
 	}
 
+	/* (re-) activate FWEH event handling */
+	err = brcmf_fweh_activate_events(ifp);
+	if (err) {
+		brcmf_err("FWEH activation failed (%d)\n", err);
+		goto wiphy_unreg_out;
+	}
+
 	return cfg;
 
 wiphy_unreg_out:
@@ -6331,6 +6339,7 @@
 priv_out:
 	wl_deinit_priv(cfg);
 	brcmf_free_vif(vif);
+	ifp->vif = NULL;
 wiphy_out:
 	brcmf_free_wiphy(wiphy);
 	return NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
index d9e6d01..6a878c8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.h
@@ -143,7 +143,6 @@
  * @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
  * @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
  * @BRCMF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress.
- * @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation.
  * @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
  */
 enum brcmf_vif_status {
@@ -151,7 +150,6 @@
 	BRCMF_VIF_STATUS_CONNECTING,
 	BRCMF_VIF_STATUS_CONNECTED,
 	BRCMF_VIF_STATUS_DISCONNECTING,
-	BRCMF_VIF_STATUS_AP_CREATING,
 	BRCMF_VIF_STATUS_AP_CREATED
 };
 
@@ -407,6 +405,7 @@
 	struct brcmu_d11inf d11inf;
 	bool wowl_enabled;
 	u32 pre_wowl_pmmode;
+	struct brcmf_assoclist_le assoclist;
 };
 
 /**
@@ -469,7 +468,8 @@
 }
 
 struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
-						  struct device *busdev);
+						  struct device *busdev,
+						  bool p2pdev_forced);
 void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
 s32 brcmf_cfg80211_up(struct net_device *ndev);
 s32 brcmf_cfg80211_down(struct net_device *ndev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index 288f831..f04833d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -101,6 +101,9 @@
 /* ARM Cortex M3 core, ID 0x82a */
 #define BCM4329_CORE_ARM_BASE		0x18002000
 
+/* Max possibly supported memory size (limited by IO mapped memory) */
+#define BRCMF_CHIP_MAX_MEMSIZE		(4 * 1024 * 1024)
+
 #define CORE_SB(base, field) \
 		(base + SBCONFIGOFF + offsetof(struct sbconfig, field))
 #define	SBCOREREV(sbidh) \
@@ -205,6 +208,7 @@
 };
 
 #define SOCRAMREGOFFS(_f)	offsetof(struct sbsocramregs, _f)
+#define SYSMEMREGOFFS(_f)	offsetof(struct sbsocramregs, _f)
 
 #define ARMCR4_CAP		(0x04)
 #define ARMCR4_BANKIDX		(0x40)
@@ -513,6 +517,9 @@
 		case BCMA_CORE_ARM_CR4:
 			cpu_found = true;
 			break;
+		case BCMA_CORE_ARM_CA7:
+			cpu_found = true;
+			break;
 		default:
 			break;
 		}
@@ -611,6 +618,29 @@
 	}
 }
 
+/** Return the SYS MEM size */
+static u32 brcmf_chip_sysmem_ramsize(struct brcmf_core_priv *sysmem)
+{
+	u32 memsize = 0;
+	u32 coreinfo;
+	u32 idx;
+	u32 nb;
+	u32 banksize;
+
+	if (!brcmf_chip_iscoreup(&sysmem->pub))
+		brcmf_chip_resetcore(&sysmem->pub, 0, 0, 0);
+
+	coreinfo = brcmf_chip_core_read32(sysmem, SYSMEMREGOFFS(coreinfo));
+	nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+
+	for (idx = 0; idx < nb; idx++) {
+		brcmf_chip_socram_banksize(sysmem, idx, &banksize);
+		memsize += banksize;
+	}
+
+	return memsize;
+}
+
 /** Return the TCM-RAM size of the ARMCR4 core. */
 static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4)
 {
@@ -644,6 +674,7 @@
 		return 0x198000;
 	case BRCM_CC_4335_CHIP_ID:
 	case BRCM_CC_4339_CHIP_ID:
+	case BRCM_CC_4350_CHIP_ID:
 	case BRCM_CC_4354_CHIP_ID:
 	case BRCM_CC_4356_CHIP_ID:
 	case BRCM_CC_43567_CHIP_ID:
@@ -651,7 +682,11 @@
 	case BRCM_CC_43570_CHIP_ID:
 	case BRCM_CC_4358_CHIP_ID:
 	case BRCM_CC_43602_CHIP_ID:
+	case BRCM_CC_4371_CHIP_ID:
 		return 0x180000;
+	case BRCM_CC_4365_CHIP_ID:
+	case BRCM_CC_4366_CHIP_ID:
+		return 0x200000;
 	default:
 		brcmf_err("unknown chip: %s\n", ci->pub.name);
 		break;
@@ -674,10 +709,28 @@
 			return -EINVAL;
 		}
 	} else {
-		mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_INTERNAL_MEM);
-		mem_core = container_of(mem, struct brcmf_core_priv, pub);
-		brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize,
-					  &ci->pub.srsize);
+		mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_SYS_MEM);
+		if (mem) {
+			mem_core = container_of(mem, struct brcmf_core_priv,
+						pub);
+			ci->pub.ramsize = brcmf_chip_sysmem_ramsize(mem_core);
+			ci->pub.rambase = brcmf_chip_tcm_rambase(ci);
+			if (!ci->pub.rambase) {
+				brcmf_err("RAM base not provided with ARM CA7 core\n");
+				return -EINVAL;
+			}
+		} else {
+			mem = brcmf_chip_get_core(&ci->pub,
+						  BCMA_CORE_INTERNAL_MEM);
+			if (!mem) {
+				brcmf_err("No memory cores found\n");
+				return -ENOMEM;
+			}
+			mem_core = container_of(mem, struct brcmf_core_priv,
+						pub);
+			brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize,
+						  &ci->pub.srsize);
+		}
 	}
 	brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n",
 		  ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize,
@@ -687,6 +740,12 @@
 		brcmf_err("RAM size is undetermined\n");
 		return -ENOMEM;
 	}
+
+	if (ci->pub.ramsize > BRCMF_CHIP_MAX_MEMSIZE) {
+		brcmf_err("RAM size is incorrect\n");
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 
@@ -899,13 +958,22 @@
 
 	/* assure chip is passive for core access */
 	brcmf_chip_set_passive(&ci->pub);
+
+	/* Call bus specific reset function now. Cores have been determined
+	 * but further access may require a chip specific reset at this point.
+	 */
+	if (ci->ops->reset) {
+		ci->ops->reset(ci->ctx, &ci->pub);
+		brcmf_chip_set_passive(&ci->pub);
+	}
+
 	return brcmf_chip_get_raminfo(ci);
 }
 
 static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
 {
 	struct brcmf_core *core;
-	struct brcmf_core_priv *cr4;
+	struct brcmf_core_priv *cpu;
 	u32 val;
 
 
@@ -918,10 +986,11 @@
 		brcmf_chip_coredisable(core, 0, 0);
 		break;
 	case BCMA_CORE_ARM_CR4:
-		cr4 = container_of(core, struct brcmf_core_priv, pub);
+	case BCMA_CORE_ARM_CA7:
+		cpu = container_of(core, struct brcmf_core_priv, pub);
 
 		/* clear all IOCTL bits except HALT bit */
-		val = chip->ops->read32(chip->ctx, cr4->wrapbase + BCMA_IOCTL);
+		val = chip->ops->read32(chip->ctx, cpu->wrapbase + BCMA_IOCTL);
 		val &= ARMCR4_BCMA_IOCTL_CPUHALT;
 		brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT,
 				     ARMCR4_BCMA_IOCTL_CPUHALT);
@@ -1143,6 +1212,33 @@
 	return true;
 }
 
+static inline void
+brcmf_chip_ca7_set_passive(struct brcmf_chip_priv *chip)
+{
+	struct brcmf_core *core;
+
+	brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CA7);
+
+	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+	brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+				   D11_BCMA_IOCTL_PHYCLOCKEN,
+			     D11_BCMA_IOCTL_PHYCLOCKEN,
+			     D11_BCMA_IOCTL_PHYCLOCKEN);
+}
+
+static bool brcmf_chip_ca7_set_active(struct brcmf_chip_priv *chip, u32 rstvec)
+{
+	struct brcmf_core *core;
+
+	chip->ops->activate(chip->ctx, &chip->pub, rstvec);
+
+	/* restore ARM */
+	core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CA7);
+	brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
+
+	return true;
+}
+
 void brcmf_chip_set_passive(struct brcmf_chip *pub)
 {
 	struct brcmf_chip_priv *chip;
@@ -1156,8 +1252,16 @@
 		brcmf_chip_cr4_set_passive(chip);
 		return;
 	}
-
-	brcmf_chip_cm3_set_passive(chip);
+	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7);
+	if (arm) {
+		brcmf_chip_ca7_set_passive(chip);
+		return;
+	}
+	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3);
+	if (arm) {
+		brcmf_chip_cm3_set_passive(chip);
+		return;
+	}
 }
 
 bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec)
@@ -1171,8 +1275,14 @@
 	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
 	if (arm)
 		return brcmf_chip_cr4_set_active(chip, rstvec);
+	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CA7);
+	if (arm)
+		return brcmf_chip_ca7_set_active(chip, rstvec);
+	arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CM3);
+	if (arm)
+		return brcmf_chip_cm3_set_active(chip);
 
-	return brcmf_chip_cm3_set_active(chip);
+	return false;
 }
 
 bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
index 60dcb38..f6b5fee 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
@@ -73,6 +73,7 @@
 	u32 (*read32)(void *ctx, u32 addr);
 	void (*write32)(void *ctx, u32 addr, u32 value);
 	int (*prepare)(void *ctx);
+	int (*reset)(void *ctx, struct brcmf_chip *chip);
 	int (*setup)(void *ctx, struct brcmf_chip *chip);
 	void (*activate)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
 };
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/common.h b/drivers/net/wireless/brcm80211/brcmfmac/common.h
index 0d39d80..21c7488 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/common.h
@@ -17,4 +17,7 @@
 
 extern const u8 ALLFFMAC[ETH_ALEN];
 
+/* Sets dongle media info (drv_version, mac address). */
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+
 #endif /* BRCMFMAC_COMMON_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c
index fe9d3fb..b5ab98e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c
@@ -33,6 +33,7 @@
 #include "feature.h"
 #include "proto.h"
 #include "pcie.h"
+#include "common.h"
 
 MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
@@ -53,6 +54,8 @@
 #define BRCMF_RXREORDER_EXPIDX_VALID		0x08
 #define BRCMF_RXREORDER_NEW_HOLE		0x10
 
+#define BRCMF_BSSIDX_INVALID			-1
+
 /* Error bits */
 int brcmf_msg_level;
 module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
@@ -60,10 +63,8 @@
 
 /* P2P0 enable */
 static int brcmf_p2p_enable;
-#ifdef CONFIG_BRCMDBG
 module_param_named(p2pon, brcmf_p2p_enable, int, 0);
-MODULE_PARM_DESC(p2pon, "enable p2p management functionality");
-#endif
+MODULE_PARM_DESC(p2pon, "enable legacy p2p management functionality");
 
 char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
 {
@@ -83,6 +84,24 @@
 	return "<if_none>";
 }
 
+struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx)
+{
+	struct brcmf_if *ifp;
+	s32 bssidx;
+
+	if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
+		brcmf_err("ifidx %d out of range\n", ifidx);
+		return NULL;
+	}
+
+	ifp = NULL;
+	bssidx = drvr->if2bss[ifidx];
+	if (bssidx >= 0)
+		ifp = drvr->iflist[bssidx];
+
+	return ifp;
+}
+
 static void _brcmf_set_multicast_list(struct work_struct *work)
 {
 	struct brcmf_if *ifp;
@@ -520,17 +539,15 @@
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_pub *drvr = bus_if->drvr;
 	struct brcmf_skb_reorder_data *rd;
-	u8 ifidx;
 	int ret;
 
 	brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
 
 	/* process and remove protocol-specific header */
-	ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
-	ifp = drvr->iflist[ifidx];
+	ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp);
 
 	if (ret || !ifp || !ifp->ndev) {
-		if ((ret != -ENODATA) && ifp)
+		if (ret != -ENODATA && ifp)
 			ifp->stats.rx_errors++;
 		brcmu_pkt_buf_free_skb(skb);
 		return;
@@ -543,17 +560,11 @@
 		brcmf_netif_rx(ifp, skb);
 }
 
-void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
-		      bool success)
+void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
 {
-	struct brcmf_if *ifp;
 	struct ethhdr *eh;
 	u16 type;
 
-	ifp = drvr->iflist[ifidx];
-	if (!ifp)
-		goto done;
-
 	eh = (struct ethhdr *)(txp->data);
 	type = ntohs(eh->h_proto);
 
@@ -565,7 +576,7 @@
 
 	if (!success)
 		ifp->stats.tx_errors++;
-done:
+
 	brcmu_pkt_buf_free_skb(txp);
 }
 
@@ -573,17 +584,17 @@
 {
 	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
 	struct brcmf_pub *drvr = bus_if->drvr;
-	u8 ifidx;
+	struct brcmf_if *ifp;
 
 	/* await txstatus signal for firmware if active */
 	if (brcmf_fws_fc_active(drvr->fws)) {
 		if (!success)
 			brcmf_fws_bustxfail(drvr->fws, txp);
 	} else {
-		if (brcmf_proto_hdrpull(drvr, false, &ifidx, txp))
+		if (brcmf_proto_hdrpull(drvr, false, txp, &ifp))
 			brcmu_pkt_buf_free_skb(txp);
 		else
-			brcmf_txfinalize(drvr, txp, ifidx, success);
+			brcmf_txfinalize(ifp, txp, success);
 	}
 }
 
@@ -624,8 +635,7 @@
 
 	brcmf_cfg80211_down(ndev);
 
-	/* Set state and stop OS transmissions */
-	netif_stop_queue(ndev);
+	brcmf_net_setcarrier(ifp, false);
 
 	return 0;
 }
@@ -659,8 +669,8 @@
 		return -EIO;
 	}
 
-	/* Allow transmit calls */
-	netif_start_queue(ndev);
+	/* Clear, carrier, set when connected or AP mode. */
+	netif_carrier_off(ndev);
 	return 0;
 }
 
@@ -708,8 +718,6 @@
 	}
 
 	brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
-
-	ndev->destructor = brcmf_cfg80211_free_netdev;
 	return 0;
 
 fail:
@@ -719,6 +727,32 @@
 	return -EBADE;
 }
 
+static void brcmf_net_detach(struct net_device *ndev)
+{
+	if (ndev->reg_state == NETREG_REGISTERED)
+		unregister_netdev(ndev);
+	else
+		brcmf_cfg80211_free_netdev(ndev);
+}
+
+void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on)
+{
+	struct net_device *ndev;
+
+	brcmf_dbg(TRACE, "Enter, idx=%d carrier=%d\n", ifp->bssidx, on);
+
+	ndev = ifp->ndev;
+	brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_DISCONNECTED, !on);
+	if (on) {
+		if (!netif_carrier_ok(ndev))
+			netif_carrier_on(ndev);
+
+	} else {
+		if (netif_carrier_ok(ndev))
+			netif_carrier_off(ndev);
+	}
+}
+
 static int brcmf_net_p2p_open(struct net_device *ndev)
 {
 	brcmf_dbg(TRACE, "Enter\n");
@@ -778,7 +812,7 @@
 }
 
 struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
-			      char *name, u8 *mac_addr)
+			      bool is_p2pdev, char *name, u8 *mac_addr)
 {
 	struct brcmf_if *ifp;
 	struct net_device *ndev;
@@ -795,8 +829,7 @@
 			  ifp->ndev->name);
 		if (ifidx) {
 			netif_stop_queue(ifp->ndev);
-			unregister_netdev(ifp->ndev);
-			free_netdev(ifp->ndev);
+			brcmf_net_detach(ifp->ndev);
 			drvr->iflist[bssidx] = NULL;
 		} else {
 			brcmf_err("ignore IF event\n");
@@ -804,7 +837,7 @@
 		}
 	}
 
-	if (!brcmf_p2p_enable && bssidx == 1) {
+	if (!brcmf_p2p_enable && is_p2pdev) {
 		/* this is P2P_DEVICE interface */
 		brcmf_dbg(INFO, "allocate non-netdev interface\n");
 		ifp = kzalloc(sizeof(*ifp), GFP_KERNEL);
@@ -813,13 +846,17 @@
 	} else {
 		brcmf_dbg(INFO, "allocate netdev interface\n");
 		/* Allocate netdev, including space for private structure */
-		ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN,
-				    ether_setup);
+		ndev = alloc_netdev(sizeof(*ifp), is_p2pdev ? "p2p%d" : name,
+				    NET_NAME_UNKNOWN, ether_setup);
 		if (!ndev)
 			return ERR_PTR(-ENOMEM);
 
+		ndev->destructor = brcmf_cfg80211_free_netdev;
 		ifp = netdev_priv(ndev);
 		ifp->ndev = ndev;
+		/* store mapping ifidx to bssidx */
+		if (drvr->if2bss[ifidx] == BRCMF_BSSIDX_INVALID)
+			drvr->if2bss[ifidx] = bssidx;
 	}
 
 	ifp->drvr = drvr;
@@ -850,6 +887,8 @@
 		return;
 	}
 	brcmf_dbg(TRACE, "Enter, idx=%d, ifidx=%d\n", bssidx, ifp->ifidx);
+	if (drvr->if2bss[ifp->ifidx] == bssidx)
+		drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
 	if (ifp->ndev) {
 		if (bssidx == 0) {
 			if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
@@ -865,17 +904,28 @@
 			cancel_work_sync(&ifp->setmacaddr_work);
 			cancel_work_sync(&ifp->multicast_work);
 		}
-		/* unregister will take care of freeing it */
-		unregister_netdev(ifp->ndev);
+		brcmf_net_detach(ifp->ndev);
+	} else {
+		/* Only p2p device interfaces which get dynamically created
+		 * end up here. In this case the p2p module should be informed
+		 * about the removal of the interface within the firmware. If
+		 * not then p2p commands towards the firmware will cause some
+		 * serious troublesome side effects. The p2p module will clean
+		 * up the ifp if needed.
+		 */
+		brcmf_p2p_ifp_removed(ifp);
+		kfree(ifp);
 	}
 }
 
-void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx)
+void brcmf_remove_interface(struct brcmf_if *ifp)
 {
-	if (drvr->iflist[bssidx]) {
-		brcmf_fws_del_interface(drvr->iflist[bssidx]);
-		brcmf_del_if(drvr, bssidx);
-	}
+	if (!ifp || WARN_ON(ifp->drvr->iflist[ifp->bssidx] != ifp))
+		return;
+	brcmf_dbg(TRACE, "Enter, bssidx=%d, ifidx=%d\n", ifp->bssidx,
+		  ifp->ifidx);
+	brcmf_fws_del_interface(ifp);
+	brcmf_del_if(ifp->drvr, ifp->bssidx);
 }
 
 int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr)
@@ -906,6 +956,7 @@
 {
 	struct brcmf_pub *drvr = NULL;
 	int ret = 0;
+	int i;
 
 	brcmf_dbg(TRACE, "Enter\n");
 
@@ -914,6 +965,9 @@
 	if (!drvr)
 		return -ENOMEM;
 
+	for (i = 0; i < ARRAY_SIZE(drvr->if2bss); i++)
+		drvr->if2bss[i] = BRCMF_BSSIDX_INVALID;
+
 	mutex_init(&drvr->proto_block);
 
 	/* Link to bus module */
@@ -921,8 +975,8 @@
 	drvr->bus_if = dev_get_drvdata(dev);
 	drvr->bus_if->drvr = drvr;
 
-	/* create device debugfs folder */
-	brcmf_debugfs_attach(drvr);
+	/* attach debug facilities */
+	brcmf_debug_attach(drvr);
 
 	/* Attach and link in the protocol */
 	ret = brcmf_proto_attach(drvr);
@@ -981,16 +1035,11 @@
 	brcmf_dbg(TRACE, "\n");
 
 	/* add primary networking interface */
-	ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
+	ifp = brcmf_add_if(drvr, 0, 0, false, "wlan%d", NULL);
 	if (IS_ERR(ifp))
 		return PTR_ERR(ifp);
 
-	if (brcmf_p2p_enable)
-		p2p_ifp = brcmf_add_if(drvr, 1, 0, "p2p%d", NULL);
-	else
-		p2p_ifp = NULL;
-	if (IS_ERR(p2p_ifp))
-		p2p_ifp = NULL;
+	p2p_ifp = NULL;
 
 	/* signal bus ready */
 	brcmf_bus_change_state(bus_if, BRCMF_BUS_UP);
@@ -1017,39 +1066,37 @@
 
 	brcmf_fws_add_interface(ifp);
 
-	drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
+	drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev,
+					     brcmf_p2p_enable);
 	if (drvr->config == NULL) {
 		ret = -ENOMEM;
 		goto fail;
 	}
 
-	ret = brcmf_fweh_activate_events(ifp);
-	if (ret < 0)
-		goto fail;
-
 	ret = brcmf_net_attach(ifp, false);
+
+	if ((!ret) && (brcmf_p2p_enable)) {
+		p2p_ifp = drvr->iflist[1];
+		if (p2p_ifp)
+			ret = brcmf_net_p2p_attach(p2p_ifp);
+	}
 fail:
 	if (ret < 0) {
 		brcmf_err("failed: %d\n", ret);
-		brcmf_cfg80211_detach(drvr->config);
+		if (drvr->config) {
+			brcmf_cfg80211_detach(drvr->config);
+			drvr->config = NULL;
+		}
 		if (drvr->fws) {
 			brcmf_fws_del_interface(ifp);
 			brcmf_fws_deinit(drvr);
 		}
-		if (drvr->iflist[0]) {
-			free_netdev(ifp->ndev);
-			drvr->iflist[0] = NULL;
-		}
-		if (p2p_ifp) {
-			free_netdev(p2p_ifp->ndev);
-			drvr->iflist[1] = NULL;
-		}
+		if (ifp)
+			brcmf_net_detach(ifp->ndev);
+		if (p2p_ifp)
+			brcmf_net_detach(p2p_ifp->ndev);
 		return ret;
 	}
-	if ((brcmf_p2p_enable) && (p2p_ifp))
-		if (brcmf_net_p2p_attach(p2p_ifp) < 0)
-			brcmf_p2p_enable = 0;
-
 	return 0;
 }
 
@@ -1105,7 +1152,7 @@
 
 	/* make sure primary interface removed last */
 	for (i = BRCMF_MAX_IFS-1; i > -1; i--)
-		brcmf_remove_interface(drvr, i);
+		brcmf_remove_interface(drvr->iflist[i]);
 
 	brcmf_cfg80211_detach(drvr->config);
 
@@ -1115,7 +1162,7 @@
 
 	brcmf_proto_detach(drvr);
 
-	brcmf_debugfs_detach(drvr);
+	brcmf_debug_detach(drvr);
 	bus_if->drvr = NULL;
 	kfree(drvr);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h
index 7463041..2f9101b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h
@@ -122,6 +122,7 @@
 	struct mac_address addresses[BRCMF_MAX_IFS];
 
 	struct brcmf_if *iflist[BRCMF_MAX_IFS];
+	s32 if2bss[BRCMF_MAX_IFS];
 
 	struct mutex proto_block;
 	unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
@@ -153,10 +154,13 @@
  *	netif stopped due to firmware signalling flow control.
  * @BRCMF_NETIF_STOP_REASON_FLOW:
  *	netif stopped due to flowring full.
+ * @BRCMF_NETIF_STOP_REASON_DISCONNECTED:
+ *	netif stopped due to not being connected (STA mode).
  */
 enum brcmf_netif_stop_reason {
-	BRCMF_NETIF_STOP_REASON_FWS_FC = 1,
-	BRCMF_NETIF_STOP_REASON_FLOW = 2
+	BRCMF_NETIF_STOP_REASON_FWS_FC = BIT(0),
+	BRCMF_NETIF_STOP_REASON_FLOW = BIT(1),
+	BRCMF_NETIF_STOP_REASON_DISCONNECTED = BIT(2)
 };
 
 /**
@@ -202,19 +206,16 @@
 
 /* Return pointer to interface name */
 char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
-
+struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx);
 int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
 struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
-			      char *name, u8 *mac_addr);
-void brcmf_remove_interface(struct brcmf_pub *drvr, u32 bssidx);
+			      bool is_p2pdev, char *name, u8 *mac_addr);
+void brcmf_remove_interface(struct brcmf_if *ifp);
 int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr);
 void brcmf_txflowblock_if(struct brcmf_if *ifp,
 			  enum brcmf_netif_stop_reason reason, bool state);
-void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp, u8 ifidx,
-		      bool success);
+void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
-
-/* Sets dongle media info (drv_version, mac address). */
-int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
 
 #endif /* BRCMFMAC_CORE_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
index 2d6d005..1299dcc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/debug.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.c
@@ -16,15 +16,45 @@
 #include <linux/debugfs.h>
 #include <linux/netdevice.h>
 #include <linux/module.h>
+#include <linux/devcoredump.h>
 
 #include <brcmu_wifi.h>
 #include <brcmu_utils.h>
 #include "core.h"
 #include "bus.h"
+#include "fweh.h"
 #include "debug.h"
 
 static struct dentry *root_folder;
 
+static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
+				      size_t len)
+{
+	void *dump;
+	size_t ramsize;
+
+	ramsize = brcmf_bus_get_ramsize(bus);
+	if (ramsize) {
+		dump = vzalloc(len + ramsize);
+		if (!dump)
+			return -ENOMEM;
+		memcpy(dump, data, len);
+		brcmf_bus_get_memdump(bus, dump + len, ramsize);
+		dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+	}
+	return 0;
+}
+
+static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp,
+					   const struct brcmf_event_msg *evtmsg,
+					   void *data)
+{
+	brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx);
+
+	return brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
+					  evtmsg->datalen);
+}
+
 void brcmf_debugfs_init(void)
 {
 	root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
@@ -41,7 +71,7 @@
 	root_folder = NULL;
 }
 
-int brcmf_debugfs_attach(struct brcmf_pub *drvr)
+int brcmf_debug_attach(struct brcmf_pub *drvr)
 {
 	struct device *dev = drvr->bus_if->dev;
 
@@ -49,12 +79,18 @@
 		return -ENODEV;
 
 	drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
+	if (IS_ERR(drvr->dbgfs_dir))
+		return PTR_ERR(drvr->dbgfs_dir);
 
-	return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
+
+	return brcmf_fweh_register(drvr, BRCMF_E_PSM_WATCHDOG,
+				   brcmf_debug_psm_watchdog_notify);
 }
 
-void brcmf_debugfs_detach(struct brcmf_pub *drvr)
+void brcmf_debug_detach(struct brcmf_pub *drvr)
 {
+	brcmf_fweh_unregister(drvr, BRCMF_E_PSM_WATCHDOG);
+
 	if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
 		debugfs_remove_recursive(drvr->dbgfs_dir);
 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/brcm80211/brcmfmac/debug.h
index eb0b8c4..d0d9676 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/debug.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/debug.h
@@ -37,6 +37,7 @@
 #define BRCMF_SDIO_VAL		0x00020000
 #define BRCMF_MSGBUF_VAL	0x00040000
 #define BRCMF_PCIE_VAL		0x00080000
+#define BRCMF_FWCON_VAL		0x00100000
 
 /* set default print format */
 #undef pr_fmt
@@ -78,6 +79,7 @@
 #define BRCMF_GLOM_ON()		(brcmf_msg_level & BRCMF_GLOM_VAL)
 #define BRCMF_EVENT_ON()	(brcmf_msg_level & BRCMF_EVENT_VAL)
 #define BRCMF_FIL_ON()		(brcmf_msg_level & BRCMF_FIL_VAL)
+#define BRCMF_FWCON_ON()	(brcmf_msg_level & BRCMF_FWCON_VAL)
 
 #else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
 
@@ -90,6 +92,7 @@
 #define BRCMF_GLOM_ON()		0
 #define BRCMF_EVENT_ON()	0
 #define BRCMF_FIL_ON()		0
+#define BRCMF_FWCON_ON()	0
 
 #endif /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
 
@@ -106,8 +109,8 @@
 #ifdef DEBUG
 void brcmf_debugfs_init(void);
 void brcmf_debugfs_exit(void);
-int brcmf_debugfs_attach(struct brcmf_pub *drvr);
-void brcmf_debugfs_detach(struct brcmf_pub *drvr);
+int brcmf_debug_attach(struct brcmf_pub *drvr);
+void brcmf_debug_detach(struct brcmf_pub *drvr);
 struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
 int brcmf_debugfs_add_entry(struct brcmf_pub *drvr, const char *fn,
 			    int (*read_fn)(struct seq_file *seq, void *data));
@@ -118,11 +121,11 @@
 static inline void brcmf_debugfs_exit(void)
 {
 }
-static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
+static inline int brcmf_debug_attach(struct brcmf_pub *drvr)
 {
 	return 0;
 }
-static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
+static inline void brcmf_debug_detach(struct brcmf_pub *drvr)
 {
 }
 static inline
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
index 1e94e94..44bb306 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/feature.c
@@ -15,6 +15,7 @@
  */
 
 #include <linux/netdevice.h>
+#include <linux/module.h>
 
 #include <brcm_hw_ids.h>
 #include "core.h"
@@ -23,6 +24,12 @@
 #include "fwil.h"
 #include "feature.h"
 
+
+/* Module param feature_disable (global for all devices) */
+static int brcmf_feature_disable;
+module_param_named(feature_disable, brcmf_feature_disable, int, 0);
+MODULE_PARM_DESC(feature_disable, "Disable features");
+
 /*
  * expand feature list to array of feature strings.
  */
@@ -121,7 +128,7 @@
 
 void brcmf_feat_attach(struct brcmf_pub *drvr)
 {
-	struct brcmf_if *ifp = drvr->iflist[0];
+	struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
 
 	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
 	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
@@ -131,6 +138,12 @@
 		brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
 	brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_P2P, "p2p");
 
+	if (brcmf_feature_disable) {
+		brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
+			  ifp->drvr->feat_flags, brcmf_feature_disable);
+		ifp->drvr->feat_flags &= ~brcmf_feature_disable;
+	}
+
 	/* set chip related quirks */
 	switch (drvr->bus_if->chip) {
 	case BRCM_CC_43236_CHIP_ID:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 971920f..4248f3c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -29,7 +29,7 @@
 #define BRCMF_FW_NVRAM_PCIEDEV_LEN		10	/* pcie/1/4/ + \0 */
 
 char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
-module_param_string(firmware_path, brcmf_firmware_path,
+module_param_string(alternative_fw_path, brcmf_firmware_path,
 		    BRCMF_FW_PATH_LEN, 0440);
 
 enum nvram_parser_state {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
index 8d1ab4a..2ca783f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -221,7 +221,7 @@
 
 	bus_if = dev_get_drvdata(flow->dev);
 	drvr = bus_if->drvr;
-	ifp = drvr->iflist[ifidx];
+	ifp = brcmf_get_ifp(drvr, ifidx);
 	brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, blocked);
 
 	spin_unlock_irqrestore(&flow->block_lock, flags);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
index 5551861..95fd1c9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
@@ -34,7 +34,7 @@
 };
 
 struct brcmf_flowring_ring {
-	u8 hash_id;
+	u16 hash_id;
 	bool blocked;
 	enum ring_status status;
 	struct sk_buff_head skblist;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index ec62492..3878b6f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -179,25 +179,28 @@
 {
 	struct brcmf_if_event *ifevent = data;
 	struct brcmf_if *ifp;
+	bool is_p2pdev;
 	int err = 0;
 
 	brcmf_dbg(EVENT, "action: %u idx: %u bsscfg: %u flags: %u role: %u\n",
 		  ifevent->action, ifevent->ifidx, ifevent->bssidx,
 		  ifevent->flags, ifevent->role);
 
-	/* The P2P Device interface event must not be ignored
-	 * contrary to what firmware tells us. The only way to
-	 * distinguish the P2P Device is by looking at the ifidx
-	 * and bssidx received.
+	/* The P2P Device interface event must not be ignored contrary to what
+	 * firmware tells us. Older firmware uses p2p noif, with sta role.
+	 * This should be accepted when p2pdev_setup is ongoing. TDLS setup will
+	 * use the same ifevent and should be ignored.
 	 */
-	if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) &&
-	    (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
+	is_p2pdev = ((ifevent->flags & BRCMF_E_IF_FLAG_NOIF) &&
+		     (ifevent->role == BRCMF_E_IF_ROLE_P2P_CLIENT ||
+		      ((ifevent->role == BRCMF_E_IF_ROLE_STA) &&
+		       (drvr->fweh.p2pdev_setup_ongoing))));
+	if (!is_p2pdev && (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
 		brcmf_dbg(EVENT, "event can be ignored\n");
 		return;
 	}
 	if (ifevent->ifidx >= BRCMF_MAX_IFS) {
-		brcmf_err("invalid interface index: %u\n",
-			  ifevent->ifidx);
+		brcmf_err("invalid interface index: %u\n", ifevent->ifidx);
 		return;
 	}
 
@@ -207,10 +210,11 @@
 		brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname,
 			  emsg->addr);
 		ifp = brcmf_add_if(drvr, ifevent->bssidx, ifevent->ifidx,
-				   emsg->ifname, emsg->addr);
+				   is_p2pdev, emsg->ifname, emsg->addr);
 		if (IS_ERR(ifp))
 			return;
-		brcmf_fws_add_interface(ifp);
+		if (!is_p2pdev)
+			brcmf_fws_add_interface(ifp);
 		if (!drvr->fweh.evt_handler[BRCMF_E_IF])
 			if (brcmf_net_attach(ifp, false) < 0)
 				return;
@@ -222,7 +226,7 @@
 	err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
 
 	if (ifp && ifevent->action == BRCMF_E_IF_DEL)
-		brcmf_remove_interface(drvr, ifevent->bssidx);
+		brcmf_remove_interface(ifp);
 }
 
 /**
@@ -297,8 +301,7 @@
 			goto event_free;
 		}
 
-		if ((event->code == BRCMF_E_TDLS_PEER_EVENT) &&
-		    (emsg.bsscfgidx == 1))
+		if (event->code == BRCMF_E_TDLS_PEER_EVENT)
 			ifp = drvr->iflist[0];
 		else
 			ifp = drvr->iflist[emsg.bsscfgidx];
@@ -315,6 +318,17 @@
 }
 
 /**
+ * brcmf_fweh_p2pdev_setup() - P2P device setup ongoing (or not).
+ *
+ * @ifp: ifp on which setup is taking place or finished.
+ * @ongoing: p2p device setup in progress (or not).
+ */
+void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing)
+{
+	ifp->drvr->fweh.p2pdev_setup_ongoing = ongoing;
+}
+
+/**
  * brcmf_fweh_attach() - initialize firmware event handling.
  *
  * @drvr: driver information object.
@@ -335,7 +349,7 @@
 void brcmf_fweh_detach(struct brcmf_pub *drvr)
 {
 	struct brcmf_fweh_info *fweh = &drvr->fweh;
-	struct brcmf_if *ifp = drvr->iflist[0];
+	struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
 	s8 eventmask[BRCMF_EVENTING_MASK_LEN];
 
 	if (ifp) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index 1326898..d9a9428 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -230,12 +230,14 @@
 /**
  * struct brcmf_fweh_info - firmware event handling information.
  *
+ * @p2pdev_setup_ongoing: P2P device creation in progress.
  * @event_work: event worker.
  * @evt_q_lock: lock for event queue protection.
  * @event_q: event queue.
  * @evt_handler: registered event handlers.
  */
 struct brcmf_fweh_info {
+	bool p2pdev_setup_ongoing;
 	struct work_struct event_work;
 	spinlock_t evt_q_lock;
 	struct list_head event_q;
@@ -255,6 +257,7 @@
 int brcmf_fweh_activate_events(struct brcmf_if *ifp);
 void brcmf_fweh_process_event(struct brcmf_pub *drvr,
 			      struct brcmf_event *event_packet);
+void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing);
 
 static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
 					  struct sk_buff *skb)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index 5434dcf..b20fc0f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -72,6 +72,7 @@
 #define BRCMF_C_GET_BSS_INFO			136
 #define BRCMF_C_GET_BANDLIST			140
 #define BRCMF_C_SET_SCB_TIMEOUT			158
+#define BRCMF_C_GET_ASSOCLIST			159
 #define BRCMF_C_GET_PHYLIST			180
 #define BRCMF_C_SET_SCAN_CHANNEL_TIME		185
 #define BRCMF_C_SET_SCAN_UNASSOC_TIME		187
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 297911f..daa427b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -119,6 +119,8 @@
 #define BRCMF_COUNTRY_BUF_SZ		4
 #define BRCMF_ANT_MAX			4
 
+#define BRCMF_MAX_ASSOCLIST		128
+
 /* join preference types for join_pref iovar */
 enum brcmf_join_pref_types {
 	BRCMF_JOIN_PREF_RSSI = 1,
@@ -621,4 +623,15 @@
 	__le32 nvramrev;
 };
 
+/**
+ * struct brcmf_assoclist_le - request assoc list.
+ *
+ * @count: indicates number of stations.
+ * @mac: MAC addresses of stations.
+ */
+struct brcmf_assoclist_le {
+	__le32 count;
+	u8 mac[BRCMF_MAX_ASSOCLIST][ETH_ALEN];
+};
+
 #endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index 5017eaa..086cac3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -972,7 +972,7 @@
 brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq,
 			     u8 if_id)
 {
-	struct brcmf_if *ifp = fws->drvr->iflist[!if_id ? 0 : if_id + 1];
+	struct brcmf_if *ifp = brcmf_get_ifp(fws->drvr, if_id);
 
 	if (WARN_ON(!ifp))
 		return;
@@ -1398,7 +1398,7 @@
 }
 
 static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
-					 struct sk_buff *skb, u8 ifidx,
+					 struct sk_buff *skb,
 					 u32 genbit, u16 seq)
 {
 	struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac;
@@ -1448,7 +1448,7 @@
 	struct sk_buff *skb;
 	struct brcmf_skbuff_cb *skcb;
 	struct brcmf_fws_mac_descriptor *entry = NULL;
-	u8 ifidx;
+	struct brcmf_if *ifp;
 
 	brcmf_dbg(DATA, "flags %d\n", flags);
 
@@ -1497,15 +1497,16 @@
 	}
 	brcmf_fws_macdesc_return_req_credit(skb);
 
-	if (brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb)) {
+	ret = brcmf_proto_hdrpull(fws->drvr, false, skb, &ifp);
+	if (ret) {
 		brcmu_pkt_buf_free_skb(skb);
 		return -EINVAL;
 	}
 	if (!remove_from_hanger)
-		ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, ifidx,
+		ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb,
 						    genbit, seq);
 	if (remove_from_hanger || ret)
-		brcmf_txfinalize(fws->drvr, skb, ifidx, true);
+		brcmf_txfinalize(ifp, skb, true);
 
 	return 0;
 }
@@ -1615,11 +1616,10 @@
 	return 0;
 }
 
-int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
-		      struct sk_buff *skb)
+void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
 {
 	struct brcmf_skb_reorder_data *rd;
-	struct brcmf_fws_info *fws = drvr->fws;
+	struct brcmf_fws_info *fws = ifp->drvr->fws;
 	u8 *signal_data;
 	s16 data_len;
 	u8 type;
@@ -1629,20 +1629,20 @@
 	s32 err;
 
 	brcmf_dbg(HDRS, "enter: ifidx %d, skblen %u, sig %d\n",
-		  ifidx, skb->len, signal_len);
+		  ifp->ifidx, skb->len, siglen);
 
-	WARN_ON(signal_len > skb->len);
+	WARN_ON(siglen > skb->len);
 
-	if (!signal_len)
-		return 0;
+	if (!siglen)
+		return;
 	/* if flow control disabled, skip to packet data and leave */
 	if ((!fws) || (!fws->fw_signals)) {
-		skb_pull(skb, signal_len);
-		return 0;
+		skb_pull(skb, siglen);
+		return;
 	}
 
 	fws->stats.header_pulls++;
-	data_len = signal_len;
+	data_len = siglen;
 	signal_data = skb->data;
 
 	status = BRCMF_FWS_RET_OK_NOSCHEDULE;
@@ -1730,14 +1730,12 @@
 	/* signalling processing result does
 	 * not affect the actual ethernet packet.
 	 */
-	skb_pull(skb, signal_len);
+	skb_pull(skb, siglen);
 
 	/* this may be a signal-only packet
 	 */
 	if (skb->len == 0)
 		fws->stats.header_only_pkt++;
-
-	return 0;
 }
 
 static u8 brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo,
@@ -1848,7 +1846,7 @@
 		entry->transit_count--;
 		if (entry->suppressed)
 			entry->suppr_transit_count--;
-		brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
+		(void)brcmf_proto_hdrpull(fws->drvr, false, skb, NULL);
 		goto rollback;
 	}
 
@@ -1904,7 +1902,7 @@
 	if (fws->avoid_queueing) {
 		rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb);
 		if (rc < 0)
-			brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
+			brcmf_txfinalize(ifp, skb, false);
 		return rc;
 	}
 
@@ -1928,7 +1926,7 @@
 		brcmf_fws_schedule_deq(fws);
 	} else {
 		brcmf_err("drop skb: no hanger slot\n");
-		brcmf_txfinalize(drvr, skb, ifp->ifidx, false);
+		brcmf_txfinalize(ifp, skb, false);
 		rc = -ENOMEM;
 	}
 	brcmf_fws_unlock(fws);
@@ -2008,8 +2006,9 @@
 				ret = brcmf_proto_txdata(drvr, ifidx, 0, skb);
 				brcmf_fws_lock(fws);
 				if (ret < 0)
-					brcmf_txfinalize(drvr, skb, ifidx,
-							 false);
+					brcmf_txfinalize(brcmf_get_ifp(drvr,
+								       ifidx),
+							 skb, false);
 				if (fws->bus_flow_blocked)
 					break;
 			}
@@ -2117,6 +2116,7 @@
 int brcmf_fws_init(struct brcmf_pub *drvr)
 {
 	struct brcmf_fws_info *fws;
+	struct brcmf_if *ifp;
 	u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
 	int rc;
 	u32 mode;
@@ -2176,21 +2176,22 @@
 	 * continue. Set mode back to none indicating not enabled.
 	 */
 	fws->fw_signals = true;
-	if (brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv)) {
+	ifp = brcmf_get_ifp(drvr, 0);
+	if (brcmf_fil_iovar_int_set(ifp, "tlv", tlv)) {
 		brcmf_err("failed to set bdcv2 tlv signaling\n");
 		fws->fcmode = BRCMF_FWS_FCMODE_NONE;
 		fws->fw_signals = false;
 	}
 
-	if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1))
+	if (brcmf_fil_iovar_int_set(ifp, "ampdu_hostreorder", 1))
 		brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n");
 
 	/* Enable seq number reuse, if supported */
-	if (brcmf_fil_iovar_int_get(drvr->iflist[0], "wlfc_mode", &mode) == 0) {
+	if (brcmf_fil_iovar_int_get(ifp, "wlfc_mode", &mode) == 0) {
 		if (BRCMF_FWS_MODE_GET_REUSESEQ(mode)) {
 			mode = 0;
 			BRCMF_FWS_MODE_SET_REUSESEQ(mode, 1);
-			if (brcmf_fil_iovar_int_set(drvr->iflist[0],
+			if (brcmf_fil_iovar_int_set(ifp,
 						    "wlfc_mode", mode) == 0) {
 				BRCMF_FWS_MODE_SET_REUSESEQ(fws->mode, 1);
 			}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
index 9fc8609..a36bac1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
@@ -21,8 +21,7 @@
 int brcmf_fws_init(struct brcmf_pub *drvr);
 void brcmf_fws_deinit(struct brcmf_pub *drvr);
 bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
-int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
-		      struct sk_buff *skb);
+void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb);
 int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb);
 
 void brcmf_fws_reset_interface(struct brcmf_if *ifp);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 7b2136c..44e618f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -522,7 +522,7 @@
 
 
 static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
-				u8 *ifidx, struct sk_buff *skb)
+				struct sk_buff *skb, struct brcmf_if **ifp)
 {
 	return -ENODEV;
 }
@@ -873,7 +873,8 @@
 	commonring = msgbuf->flowrings[flowid];
 	atomic_dec(&commonring->outstanding_tx);
 
-	brcmf_txfinalize(msgbuf->drvr, skb, tx_status->msg.ifidx, true);
+	brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx),
+			 skb, true);
 }
 
 
@@ -1081,15 +1082,7 @@
 {
 	struct brcmf_if *ifp;
 
-	/* The ifidx is the idx to map to matching netdev/ifp. When receiving
-	 * events this is easy because it contains the bssidx which maps
-	 * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd.
-	 * bssidx 1 is used for p2p0 and no data can be received or
-	 * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0
-	 */
-	if (ifidx)
-		(ifidx)++;
-	ifp = msgbuf->drvr->iflist[ifidx];
+	ifp = brcmf_get_ifp(msgbuf->drvr, ifidx);
 	if (!ifp || !ifp->ndev) {
 		brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
 		brcmu_pkt_buf_free_skb(skb);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index a9ba775..d224b3d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -2084,11 +2084,13 @@
 	brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
 
 	brcmf_cfg80211_arm_vif_event(p2p->cfg, p2p_vif);
+	brcmf_fweh_p2pdev_setup(pri_ifp, true);
 
 	/* Initialize P2P Discovery in the firmware */
 	err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
 	if (err < 0) {
 		brcmf_err("set p2p_disc error\n");
+		brcmf_fweh_p2pdev_setup(pri_ifp, false);
 		brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL);
 		goto fail;
 	}
@@ -2097,6 +2099,7 @@
 	err = brcmf_cfg80211_wait_vif_event_timeout(p2p->cfg, BRCMF_E_IF_ADD,
 						    msecs_to_jiffies(1500));
 	brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL);
+	brcmf_fweh_p2pdev_setup(pri_ifp, false);
 	if (!err) {
 		brcmf_err("timeout occurred\n");
 		err = -EIO;
@@ -2131,20 +2134,6 @@
 }
 
 /**
- * brcmf_p2p_delete_p2pdev() - delete P2P_DEVICE virtual interface.
- *
- * @vif: virtual interface object to delete.
- */
-static void brcmf_p2p_delete_p2pdev(struct brcmf_p2p_info *p2p,
-				    struct brcmf_cfg80211_vif *vif)
-{
-	cfg80211_unregister_wdev(&vif->wdev);
-	p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
-	brcmf_remove_interface(vif->ifp->drvr, vif->ifp->bssidx);
-	brcmf_free_vif(vif);
-}
-
-/**
  * brcmf_p2p_add_vif() - create a new P2P virtual interface.
  *
  * @wiphy: wiphy device of new interface.
@@ -2255,6 +2244,7 @@
 	brcmf_dbg(TRACE, "delete P2P vif\n");
 	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
 
+	brcmf_cfg80211_arm_vif_event(cfg, vif);
 	switch (vif->wdev.iftype) {
 	case NL80211_IFTYPE_P2P_CLIENT:
 		if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
@@ -2267,10 +2257,10 @@
 		break;
 
 	case NL80211_IFTYPE_P2P_DEVICE:
+		if (!p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif)
+			return 0;
 		brcmf_p2p_cancel_remain_on_channel(vif->ifp);
 		brcmf_p2p_deinit_discovery(p2p);
-		brcmf_p2p_delete_p2pdev(p2p, vif);
-		return 0;
 	default:
 		return -ENOTSUPP;
 	}
@@ -2282,10 +2272,11 @@
 		wait_for_completion_timeout(&cfg->vif_disabled,
 					    msecs_to_jiffies(500));
 
-	brcmf_vif_clear_mgmt_ies(vif);
-
-	brcmf_cfg80211_arm_vif_event(cfg, vif);
-	err = brcmf_p2p_release_p2p_if(vif);
+	err = 0;
+	if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) {
+		brcmf_vif_clear_mgmt_ies(vif);
+		err = brcmf_p2p_release_p2p_if(vif);
+	}
 	if (!err) {
 		/* wait for firmware event */
 		err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_DEL,
@@ -2295,12 +2286,31 @@
 		else
 			err = 0;
 	}
+	if (err)
+		brcmf_remove_interface(vif->ifp);
+
 	brcmf_cfg80211_arm_vif_event(cfg, NULL);
-	p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
+	if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE)
+		p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
 
 	return err;
 }
 
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
+{
+	struct brcmf_cfg80211_info *cfg;
+	struct brcmf_cfg80211_vif *vif;
+
+	brcmf_dbg(INFO, "P2P: device interface removed\n");
+	vif = ifp->vif;
+	cfg = wdev_to_cfg(&vif->wdev);
+	cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
+	rtnl_lock();
+	cfg80211_unregister_wdev(&vif->wdev);
+	rtnl_unlock();
+	brcmf_free_vif(vif);
+}
+
 int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
 	struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
@@ -2324,87 +2334,49 @@
 	struct brcmf_cfg80211_vif *vif;
 
 	vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
-	mutex_lock(&cfg->usr_sync);
-	(void)brcmf_p2p_deinit_discovery(p2p);
-	brcmf_abort_scanning(cfg);
-	clear_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state);
-	mutex_unlock(&cfg->usr_sync);
+	/* This call can be result of the unregister_wdev call. In that case
+	 * we dont want to do anything anymore. Just return. The config vif
+	 * will have been cleared at this point.
+	 */
+	if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif == vif) {
+		mutex_lock(&cfg->usr_sync);
+		/* Set the discovery state to SCAN */
+		(void)brcmf_p2p_set_discover_state(vif->ifp,
+						   WL_P2P_DISC_ST_SCAN, 0, 0);
+		brcmf_abort_scanning(cfg);
+		clear_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state);
+		mutex_unlock(&cfg->usr_sync);
+	}
 }
 
 /**
  * brcmf_p2p_attach() - attach for P2P.
  *
  * @cfg: driver private data for cfg80211 interface.
+ * @p2pdev_forced: create p2p device interface at attach.
  */
-s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg)
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)
 {
-	struct brcmf_if *pri_ifp;
-	struct brcmf_if *p2p_ifp;
-	struct brcmf_cfg80211_vif *p2p_vif;
 	struct brcmf_p2p_info *p2p;
-	struct brcmf_pub *drvr;
-	s32 bssidx;
+	struct brcmf_if *pri_ifp;
 	s32 err = 0;
+	void *err_ptr;
 
 	p2p = &cfg->p2p;
 	p2p->cfg = cfg;
 
-	drvr = cfg->pub;
-
-	pri_ifp = drvr->iflist[0];
-	p2p_ifp = drvr->iflist[1];
-
+	pri_ifp = brcmf_get_ifp(cfg->pub, 0);
 	p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
 
-	if (p2p_ifp) {
-		p2p_vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_P2P_DEVICE,
-					  false);
-		if (IS_ERR(p2p_vif)) {
-			brcmf_err("could not create discovery vif\n");
-			err = -ENOMEM;
-			goto exit;
+	if (p2pdev_forced) {
+		err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);
+		if (IS_ERR(err_ptr)) {
+			brcmf_err("P2P device creation failed.\n");
+			err = PTR_ERR(err_ptr);
 		}
-
-		p2p_vif->ifp = p2p_ifp;
-		p2p_ifp->vif = p2p_vif;
-		p2p_vif->wdev.netdev = p2p_ifp->ndev;
-		p2p_ifp->ndev->ieee80211_ptr = &p2p_vif->wdev;
-		SET_NETDEV_DEV(p2p_ifp->ndev, wiphy_dev(cfg->wiphy));
-
-		p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif;
-
-		brcmf_p2p_generate_bss_mac(p2p, NULL);
-		memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN);
-		brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr);
-
-		/* Initialize P2P Discovery in the firmware */
-		err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1);
-		if (err < 0) {
-			brcmf_err("set p2p_disc error\n");
-			brcmf_free_vif(p2p_vif);
-			goto exit;
-		}
-		/* obtain bsscfg index for P2P discovery */
-		err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bssidx);
-		if (err < 0) {
-			brcmf_err("retrieving discover bsscfg index failed\n");
-			brcmf_free_vif(p2p_vif);
-			goto exit;
-		}
-		/* Verify that firmware uses same bssidx as driver !! */
-		if (p2p_ifp->bssidx != bssidx) {
-			brcmf_err("Incorrect bssidx=%d, compared to p2p_ifp->bssidx=%d\n",
-				  bssidx, p2p_ifp->bssidx);
-			brcmf_free_vif(p2p_vif);
-			goto exit;
-		}
-
-		init_completion(&p2p->send_af_done);
-		INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
-		init_completion(&p2p->afx_hdl.act_frm_scan);
-		init_completion(&p2p->wait_next_af);
+	} else {
+		p2p->p2pdev_dynamically = true;
 	}
-exit:
 	return err;
 }
 
@@ -2421,10 +2393,7 @@
 	if (vif != NULL) {
 		brcmf_p2p_cancel_remain_on_channel(vif->ifp);
 		brcmf_p2p_deinit_discovery(p2p);
-		/* remove discovery interface */
-		rtnl_lock();
-		brcmf_p2p_delete_p2pdev(p2p, vif);
-		rtnl_unlock();
+		brcmf_remove_interface(vif->ifp);
 	}
 	/* just set it all to zero */
 	memset(p2p, 0, sizeof(*p2p));
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
index 872f382..5d49059 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.h
@@ -124,6 +124,7 @@
  * @wait_next_af: thread synchronizing struct.
  * @gon_req_action: about to send go negotiation requets frame.
  * @block_gon_req_tx: drop tx go negotiation requets frame.
+ * @p2pdev_dynamically: is p2p device if created by module param or supplicant.
  */
 struct brcmf_p2p_info {
 	struct brcmf_cfg80211_info *cfg;
@@ -144,9 +145,10 @@
 	struct completion wait_next_af;
 	bool gon_req_action;
 	bool block_gon_req_tx;
+	bool p2pdev_dynamically;
 };
 
-s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg);
+s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced);
 void brcmf_p2p_detach(struct brcmf_p2p_info *p2p);
 struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
 				       unsigned char name_assign_type,
@@ -155,6 +157,7 @@
 int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
 int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
 		       enum brcmf_fil_p2p_if_types if_type);
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp);
 int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
 void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
 int brcmf_p2p_scan_prep(struct wiphy *wiphy,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 3a98c43..83d8042 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -47,12 +47,20 @@
 
 #define BRCMF_PCIE_43602_FW_NAME		"brcm/brcmfmac43602-pcie.bin"
 #define BRCMF_PCIE_43602_NVRAM_NAME		"brcm/brcmfmac43602-pcie.txt"
+#define BRCMF_PCIE_4350_FW_NAME			"brcm/brcmfmac4350-pcie.bin"
+#define BRCMF_PCIE_4350_NVRAM_NAME		"brcm/brcmfmac4350-pcie.txt"
 #define BRCMF_PCIE_4356_FW_NAME			"brcm/brcmfmac4356-pcie.bin"
 #define BRCMF_PCIE_4356_NVRAM_NAME		"brcm/brcmfmac4356-pcie.txt"
 #define BRCMF_PCIE_43570_FW_NAME		"brcm/brcmfmac43570-pcie.bin"
 #define BRCMF_PCIE_43570_NVRAM_NAME		"brcm/brcmfmac43570-pcie.txt"
 #define BRCMF_PCIE_4358_FW_NAME			"brcm/brcmfmac4358-pcie.bin"
 #define BRCMF_PCIE_4358_NVRAM_NAME		"brcm/brcmfmac4358-pcie.txt"
+#define BRCMF_PCIE_4365_FW_NAME			"brcm/brcmfmac4365b-pcie.bin"
+#define BRCMF_PCIE_4365_NVRAM_NAME		"brcm/brcmfmac4365b-pcie.txt"
+#define BRCMF_PCIE_4366_FW_NAME			"brcm/brcmfmac4366b-pcie.bin"
+#define BRCMF_PCIE_4366_NVRAM_NAME		"brcm/brcmfmac4366b-pcie.txt"
+#define BRCMF_PCIE_4371_FW_NAME			"brcm/brcmfmac4371-pcie.bin"
+#define BRCMF_PCIE_4371_NVRAM_NAME		"brcm/brcmfmac4371-pcie.txt"
 
 #define BRCMF_PCIE_FW_UP_TIMEOUT		2000 /* msec */
 
@@ -74,6 +82,8 @@
 #define BRCMF_PCIE_REG_INTMASK			0x94
 #define BRCMF_PCIE_REG_SBMBX			0x98
 
+#define BRCMF_PCIE_REG_LINK_STATUS_CTRL		0xBC
+
 #define BRCMF_PCIE_PCIE2REG_INTMASK		0x24
 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT		0x48
 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK		0x4C
@@ -192,12 +202,20 @@
 
 MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4350_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4350_NVRAM_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4365_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4365_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4366_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4366_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4371_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4371_NVRAM_NAME);
 
 
 struct brcmf_pcie_console {
@@ -434,6 +452,47 @@
 }
 
 
+static void
+brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+			  void *dstaddr, u32 len)
+{
+	void __iomem *address = devinfo->tcm + mem_offset;
+	__le32 *dst32;
+	__le16 *dst16;
+	u8 *dst8;
+
+	if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
+		if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
+			dst8 = (u8 *)dstaddr;
+			while (len) {
+				*dst8 = ioread8(address);
+				address++;
+				dst8++;
+				len--;
+			}
+		} else {
+			len = len / 2;
+			dst16 = (__le16 *)dstaddr;
+			while (len) {
+				*dst16 = cpu_to_le16(ioread16(address));
+				address += 2;
+				dst16++;
+				len--;
+			}
+		}
+	} else {
+		len = len / 4;
+		dst32 = (__le32 *)dstaddr;
+		while (len) {
+			*dst32 = cpu_to_le32(ioread32(address));
+			address += 4;
+			dst32++;
+			len--;
+		}
+	}
+}
+
+
 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
 		CHIPCREGOFFS(reg), value)
 
@@ -466,6 +525,7 @@
 
 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
 {
+	struct brcmf_core *core;
 	u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
 			     BRCMF_PCIE_CFGREG_PM_CSR,
 			     BRCMF_PCIE_CFGREG_MSI_CAP,
@@ -484,32 +544,38 @@
 	if (!devinfo->ci)
 		return;
 
+	/* Disable ASPM */
 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
-	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
-			       BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL);
-	lsc = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
+	pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
+			      &lsc);
 	val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
-	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, val);
+	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
+			       val);
 
+	/* Watchdog reset */
 	brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
 	WRITECC32(devinfo, watchdog, 4);
 	msleep(100);
 
+	/* Restore ASPM */
 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
-	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
-			       BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL);
-	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, lsc);
+	pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
+			       lsc);
 
-	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
-	for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
-		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR,
-				       cfg_offset[i]);
-		val = brcmf_pcie_read_reg32(devinfo,
-					    BRCMF_PCIE_PCIE2REG_CONFIGDATA);
-		brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
-			  cfg_offset[i], val);
-		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA,
-				       val);
+	core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
+	if (core->rev <= 13) {
+		for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
+			brcmf_pcie_write_reg32(devinfo,
+					       BRCMF_PCIE_PCIE2REG_CONFIGADDR,
+					       cfg_offset[i]);
+			val = brcmf_pcie_read_reg32(devinfo,
+				BRCMF_PCIE_PCIE2REG_CONFIGDATA);
+			brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
+				  cfg_offset[i], val);
+			brcmf_pcie_write_reg32(devinfo,
+					       BRCMF_PCIE_PCIE2REG_CONFIGDATA,
+					       val);
+		}
 	}
 }
 
@@ -519,8 +585,6 @@
 	u32 config;
 
 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
-	if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0)
-		brcmf_pcie_reset_device(devinfo);
 	/* BAR1 window may not be sized properly */
 	brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
 	brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
@@ -644,7 +708,7 @@
 	addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
 	console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
 
-	brcmf_dbg(PCIE, "Console: base %x, buf %x, size %d\n",
+	brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
 		  console->base_addr, console->buf_addr, console->bufsize);
 }
 
@@ -656,6 +720,9 @@
 	u8 ch;
 	u32 newidx;
 
+	if (!BRCMF_FWCON_ON())
+		return;
+
 	console = &devinfo->shared.console;
 	addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
 	newidx = brcmf_pcie_read_tcm32(devinfo, addr);
@@ -677,7 +744,7 @@
 		}
 		if (ch == '\n') {
 			console->log_str[console->log_idx] = 0;
-			brcmf_dbg(PCIE, "CONSOLE: %s", console->log_str);
+			pr_debug("CONSOLE: %s", console->log_str);
 			console->log_idx = 0;
 		}
 	}
@@ -1330,12 +1397,36 @@
 }
 
 
+static size_t brcmf_pcie_get_ramsize(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+
+	return devinfo->ci->ramsize - devinfo->ci->srsize;
+}
+
+
+static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+	struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+
+	brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
+	brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
+	return 0;
+}
+
+
 static struct brcmf_bus_ops brcmf_pcie_bus_ops = {
 	.txdata = brcmf_pcie_tx,
 	.stop = brcmf_pcie_down,
 	.txctl = brcmf_pcie_tx_ctlpkt,
 	.rxctl = brcmf_pcie_rx_ctlpkt,
 	.wowl_config = brcmf_pcie_wowl_config,
+	.get_ramsize = brcmf_pcie_get_ramsize,
+	.get_memdump = brcmf_pcie_get_memdump,
 };
 
 
@@ -1408,6 +1499,10 @@
 		fw_name = BRCMF_PCIE_43602_FW_NAME;
 		nvram_name = BRCMF_PCIE_43602_NVRAM_NAME;
 		break;
+	case BRCM_CC_4350_CHIP_ID:
+		fw_name = BRCMF_PCIE_4350_FW_NAME;
+		nvram_name = BRCMF_PCIE_4350_NVRAM_NAME;
+		break;
 	case BRCM_CC_4356_CHIP_ID:
 		fw_name = BRCMF_PCIE_4356_FW_NAME;
 		nvram_name = BRCMF_PCIE_4356_NVRAM_NAME;
@@ -1422,6 +1517,18 @@
 		fw_name = BRCMF_PCIE_4358_FW_NAME;
 		nvram_name = BRCMF_PCIE_4358_NVRAM_NAME;
 		break;
+	case BRCM_CC_4365_CHIP_ID:
+		fw_name = BRCMF_PCIE_4365_FW_NAME;
+		nvram_name = BRCMF_PCIE_4365_NVRAM_NAME;
+		break;
+	case BRCM_CC_4366_CHIP_ID:
+		fw_name = BRCMF_PCIE_4366_FW_NAME;
+		nvram_name = BRCMF_PCIE_4366_NVRAM_NAME;
+		break;
+	case BRCM_CC_4371_CHIP_ID:
+		fw_name = BRCMF_PCIE_4371_FW_NAME;
+		nvram_name = BRCMF_PCIE_4371_NVRAM_NAME;
+		break;
 	default:
 		brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
 		return -ENODEV;
@@ -1633,6 +1740,23 @@
 }
 
 
+static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
+{
+	struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
+	u32 val;
+
+	devinfo->ci = chip;
+	brcmf_pcie_reset_device(devinfo);
+
+	val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
+	if (val != 0xffffffff)
+		brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
+				       val);
+
+	return 0;
+}
+
+
 static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
 					u32 rstvec)
 {
@@ -1644,6 +1768,7 @@
 
 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
 	.prepare = brcmf_pcie_buscoreprep,
+	.reset = brcmf_pcie_buscore_reset,
 	.activate = brcmf_pcie_buscore_activate,
 	.read32 = brcmf_pcie_buscore_read32,
 	.write32 = brcmf_pcie_buscore_write32,
@@ -1811,7 +1936,6 @@
 		brcmf_pcie_intr_disable(devinfo);
 
 	brcmf_detach(&pdev->dev);
-	brcmf_pcie_reset_device(devinfo);
 
 	kfree(bus->bus_priv.pcie);
 	kfree(bus->msgbuf->flowrings);
@@ -1929,6 +2053,7 @@
 	PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
 
 static struct pci_device_id brcmf_pcie_devid_table[] = {
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
@@ -1937,6 +2062,13 @@
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
 	BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
+	BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
 	{ /* end: all zeroes */ }
 };
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/brcm80211/brcmfmac/proto.h
index 971172f..d55119d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/proto.h
@@ -24,8 +24,8 @@
 
 
 struct brcmf_proto {
-	int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
-		       struct sk_buff *skb);
+	int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws,
+		       struct sk_buff *skb, struct brcmf_if **ifp);
 	int (*query_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd,
 			  void *buf, uint len);
 	int (*set_dcmd)(struct brcmf_pub *drvr, int ifidx, uint cmd, void *buf,
@@ -46,9 +46,19 @@
 void brcmf_proto_detach(struct brcmf_pub *drvr);
 
 static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
-				      u8 *ifidx, struct sk_buff *skb)
+				      struct sk_buff *skb,
+				      struct brcmf_if **ifp)
 {
-	return drvr->proto->hdrpull(drvr, do_fws, ifidx, skb);
+	struct brcmf_if *tmp = NULL;
+
+	/* assure protocol is always called with
+	 * non-null initialized pointer.
+	 */
+	if (ifp)
+		*ifp = NULL;
+	else
+		ifp = &tmp;
+	return drvr->proto->hdrpull(drvr, do_fws, skb, ifp);
 }
 static inline int brcmf_proto_query_dcmd(struct brcmf_pub *drvr, int ifidx,
 					 uint cmd, void *buf, uint len)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index f990e3d..7e74ac3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -15,6 +15,7 @@
  */
 
 #include <linux/types.h>
+#include <linux/atomic.h>
 #include <linux/kernel.h>
 #include <linux/kthread.h>
 #include <linux/printk.h>
@@ -123,6 +124,7 @@
 
 #define BRCMF_FIRSTREAD	(1 << 6)
 
+#define BRCMF_CONSOLE	10	/* watchdog interval to poll console */
 
 /* SBSDIO_DEVICE_CTL */
 
@@ -3204,6 +3206,8 @@
 	if (IS_ERR_OR_NULL(dentry))
 		return;
 
+	bus->console_interval = BRCMF_CONSOLE;
+
 	brcmf_debugfs_add_entry(drvr, "forensics", brcmf_sdio_forensic_read);
 	brcmf_debugfs_add_entry(drvr, "counters",
 				brcmf_debugfs_sdio_count_read);
@@ -3535,6 +3539,51 @@
 	return err;
 }
 
+static size_t brcmf_sdio_bus_get_ramsize(struct device *dev)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+
+	return bus->ci->ramsize - bus->ci->srsize;
+}
+
+static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
+				      size_t mem_size)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+	struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+	struct brcmf_sdio *bus = sdiodev->bus;
+	int err;
+	int address;
+	int offset;
+	int len;
+
+	brcmf_dbg(INFO, "dump at 0x%08x: size=%zu\n", bus->ci->rambase,
+		  mem_size);
+
+	address = bus->ci->rambase;
+	offset = err = 0;
+	sdio_claim_host(sdiodev->func[1]);
+	while (offset < mem_size) {
+		len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK :
+		      mem_size - offset;
+		err = brcmf_sdiod_ramrw(sdiodev, false, address, data, len);
+		if (err) {
+			brcmf_err("error %d on reading %d membytes at 0x%08x\n",
+				  err, len, address);
+			goto done;
+		}
+		data += len;
+		offset += len;
+		address += len;
+	}
+
+done:
+	sdio_release_host(sdiodev->func[1]);
+	return err;
+}
+
 void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
 {
 	if (!bus->dpc_triggered) {
@@ -3613,7 +3662,7 @@
 	}
 #ifdef DEBUG
 	/* Poll for console output periodically */
-	if (bus->sdiodev->state == BRCMF_SDIOD_DATA &&
+	if (bus->sdiodev->state == BRCMF_SDIOD_DATA && BRCMF_FWCON_ON() &&
 	    bus->console_interval != 0) {
 		bus->console.count += BRCMF_WD_POLL_MS;
 		if (bus->console.count >= bus->console_interval) {
@@ -3983,7 +4032,9 @@
 	.txctl = brcmf_sdio_bus_txctl,
 	.rxctl = brcmf_sdio_bus_rxctl,
 	.gettxq = brcmf_sdio_bus_gettxq,
-	.wowl_config = brcmf_sdio_wowl_config
+	.wowl_config = brcmf_sdio_wowl_config,
+	.get_ramsize = brcmf_sdio_bus_get_ramsize,
+	.get_memdump = brcmf_sdio_bus_get_memdump,
 };
 
 static void brcmf_sdio_firmware_callback(struct device *dev,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index daba86d..689e64d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -144,6 +144,7 @@
 
 	struct usb_device *usbdev;
 	struct device *dev;
+	struct mutex dev_init_lock;
 
 	int ctl_in_pipe, ctl_out_pipe;
 	struct urb *ctl_urb; /* URB for control endpoint */
@@ -1204,6 +1205,8 @@
 	int ret;
 
 	brcmf_dbg(USB, "Start fw downloading\n");
+
+	devinfo = bus->bus_priv.usb->devinfo;
 	ret = check_file(fw->data);
 	if (ret < 0) {
 		brcmf_err("invalid firmware\n");
@@ -1211,7 +1214,6 @@
 		goto error;
 	}
 
-	devinfo = bus->bus_priv.usb->devinfo;
 	devinfo->image = fw->data;
 	devinfo->image_len = fw->size;
 
@@ -1224,9 +1226,11 @@
 	if (ret)
 		goto error;
 
+	mutex_unlock(&devinfo->dev_init_lock);
 	return;
 error:
 	brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
+	mutex_unlock(&devinfo->dev_init_lock);
 	device_release_driver(dev);
 }
 
@@ -1264,6 +1268,7 @@
 		if (ret)
 			goto fail;
 		/* we are done */
+		mutex_unlock(&devinfo->dev_init_lock);
 		return 0;
 	}
 	bus->chip = bus_pub->devid;
@@ -1317,6 +1322,12 @@
 
 	devinfo->usbdev = usb;
 	devinfo->dev = &usb->dev;
+	/* Take an init lock, to protect for disconnect while still loading.
+	 * Necessary because of the asynchronous firmware load construction
+	 */
+	mutex_init(&devinfo->dev_init_lock);
+	mutex_lock(&devinfo->dev_init_lock);
+
 	usb_set_intfdata(intf, devinfo);
 
 	/* Check that the device supports only one configuration */
@@ -1391,6 +1402,7 @@
 	return 0;
 
 fail:
+	mutex_unlock(&devinfo->dev_init_lock);
 	kfree(devinfo);
 	usb_set_intfdata(intf, NULL);
 	return ret;
@@ -1403,8 +1415,19 @@
 
 	brcmf_dbg(USB, "Enter\n");
 	devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
-	brcmf_usb_disconnect_cb(devinfo);
-	kfree(devinfo);
+
+	if (devinfo) {
+		mutex_lock(&devinfo->dev_init_lock);
+		/* Make sure that devinfo still exists. Firmware probe routines
+		 * may have released the device and cleared the intfdata.
+		 */
+		if (!usb_get_intfdata(intf))
+			goto done;
+
+		brcmf_usb_disconnect_cb(devinfo);
+		kfree(devinfo);
+	}
+done:
 	brcmf_dbg(USB, "Exit\n");
 }
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index d2c5747..bec2dc1 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -820,7 +820,7 @@
 		    struct ieee80211_vif *vif,
 		    enum ieee80211_ampdu_mlme_action action,
 		    struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-		    u8 buf_size)
+		    u8 buf_size, bool amsdu)
 {
 	struct brcms_info *wl = hw->priv;
 	struct scb *scb = &wl->wlc->pri_scb;
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 7a6daa3..aa06ea2 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -39,6 +39,7 @@
 #define BRCM_CC_4339_CHIP_ID		0x4339
 #define BRCM_CC_43430_CHIP_ID		43430
 #define BRCM_CC_4345_CHIP_ID		0x4345
+#define BRCM_CC_4350_CHIP_ID		0x4350
 #define BRCM_CC_4354_CHIP_ID		0x4354
 #define BRCM_CC_4356_CHIP_ID		0x4356
 #define BRCM_CC_43566_CHIP_ID		43566
@@ -47,6 +48,9 @@
 #define BRCM_CC_43570_CHIP_ID		43570
 #define BRCM_CC_4358_CHIP_ID		0x4358
 #define BRCM_CC_43602_CHIP_ID		43602
+#define BRCM_CC_4365_CHIP_ID		0x4365
+#define BRCM_CC_4366_CHIP_ID		0x4366
+#define BRCM_CC_4371_CHIP_ID		0x4371
 
 /* USB Device IDs */
 #define BRCM_USB_43143_DEVICE_ID	0xbd1e
@@ -56,6 +60,7 @@
 #define BRCM_USB_BCMFW_DEVICE_ID	0x0bdc
 
 /* PCIE Device IDs */
+#define BRCM_PCIE_4350_DEVICE_ID	0x43a3
 #define BRCM_PCIE_4354_DEVICE_ID	0x43df
 #define BRCM_PCIE_4356_DEVICE_ID	0x43ec
 #define BRCM_PCIE_43567_DEVICE_ID	0x43d3
@@ -65,6 +70,14 @@
 #define BRCM_PCIE_43602_2G_DEVICE_ID	0x43bb
 #define BRCM_PCIE_43602_5G_DEVICE_ID	0x43bc
 #define BRCM_PCIE_43602_RAW_DEVICE_ID	43602
+#define BRCM_PCIE_4365_DEVICE_ID	0x43ca
+#define BRCM_PCIE_4365_2G_DEVICE_ID	0x43cb
+#define BRCM_PCIE_4365_5G_DEVICE_ID	0x43cc
+#define BRCM_PCIE_4366_DEVICE_ID	0x43c3
+#define BRCM_PCIE_4366_2G_DEVICE_ID	0x43c4
+#define BRCM_PCIE_4366_5G_DEVICE_ID	0x43c5
+#define BRCM_PCIE_4371_DEVICE_ID	0x440d
+
 
 /* brcmsmac IDs */
 #define BCM4313_D11N2G_ID	0x4727	/* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index b86500b..95a7fdb 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -2137,7 +2137,7 @@
 			struct ieee80211_vif *vif,
 			enum ieee80211_ampdu_mlme_action action,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			u8 buf_size)
+			u8 buf_size, bool amsdu)
 {
 	/* Aggregation is implemented fully in firmware,
 	 * including block ack negotiation. Do not allow
diff --git a/drivers/net/wireless/cw1200/sta.h b/drivers/net/wireless/cw1200/sta.h
index b7e386b..bebb337 100644
--- a/drivers/net/wireless/cw1200/sta.h
+++ b/drivers/net/wireless/cw1200/sta.h
@@ -111,7 +111,7 @@
 			struct ieee80211_vif *vif,
 			enum ieee80211_ampdu_mlme_action action,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			u8 buf_size);
+			u8 buf_size, bool amsdu);
 
 void cw1200_suspend_resume(struct cw1200_common *priv,
 			  struct wsm_suspend_resume *arg);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 39f3e6f..ed0adaf 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -10470,7 +10470,6 @@
 		 vers, date);
 	strlcpy(info->bus_info, pci_name(p->pci_dev),
 		sizeof(info->bus_info));
-	info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
 }
 
 static u32 ipw_ethtool_get_link(struct net_device *dev)
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index a6877dd..cef7f7d 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -1091,8 +1091,6 @@
 		MFIE_STRING(TIM);
 		MFIE_STRING(IBSS_PARAMS);
 		MFIE_STRING(COUNTRY);
-		MFIE_STRING(HP_PARAMS);
-		MFIE_STRING(HP_TABLE);
 		MFIE_STRING(REQUEST);
 		MFIE_STRING(CHALLENGE);
 		MFIE_STRING(PWR_CONSTRAINT);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 44fa422..6656215 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5984,7 +5984,7 @@
 il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			enum ieee80211_ampdu_mlme_action action,
 			struct ieee80211_sta *sta, u16 tid, u16 * ssn,
-			u8 buf_size)
+			u8 buf_size, bool amsdu)
 {
 	struct il_priv *il = hw->priv;
 	int ret = -EINVAL;
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
index 3a57f71..8ab8706 100644
--- a/drivers/net/wireless/iwlegacy/4965.h
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -184,7 +184,7 @@
 int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			    enum ieee80211_ampdu_mlme_action action,
 			    struct ieee80211_sta *sta, u16 tid, u16 * ssn,
-			    u8 buf_size);
+			    u8 buf_size, bool amsdu);
 int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		       struct ieee80211_sta *sta);
 void
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index aba0957..6e949df 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -142,6 +142,7 @@
 config IWLWIFI_DEVICE_TRACING
 	bool "iwlwifi device access tracing"
 	depends on EVENT_TRACING
+	default y
 	help
 	  Say Y here to trace all commands, including TX frames and IO
 	  accesses, sent to the device. If you say yes, iwlwifi will
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index ab45819..e18629a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -1020,7 +1020,7 @@
 			u8 *pn = seq.ccmp.pn;
 
 			ieee80211_get_key_rx_seq(key, i, &seq);
-			aes_sc->pn = cpu_to_le64(
+			aes_sc[i].pn = cpu_to_le64(
 					(u64)pn[5] |
 					((u64)pn[4] << 8) |
 					((u64)pn[3] << 16) |
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 453f7c3..b3ad34e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -731,7 +731,7 @@
 				   struct ieee80211_vif *vif,
 				   enum ieee80211_ampdu_mlme_action action,
 				   struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-				   u8 buf_size)
+				   u8 buf_size, bool amsdu)
 {
 	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
 	int ret = -EINVAL;
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 6951aba..1a73c7a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -72,12 +72,10 @@
 #define IWL7260_UCODE_API_MAX	17
 
 /* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK	12
-#define IWL3165_UCODE_API_OK	13
+#define IWL7260_UCODE_API_OK	13
 
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN	12
-#define IWL3165_UCODE_API_MIN	13
+#define IWL7260_UCODE_API_MIN	13
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION		0x0a1d
@@ -113,7 +111,7 @@
 
 static const struct iwl_base_params iwl7000_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
-	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.num_of_queues = 31,
 	.pll_cfg_val = 0,
 	.shadow_ram_support = true,
 	.led_compensation = 57,
@@ -269,11 +267,6 @@
 	.name = "Intel(R) Dual Band Wireless AC 3165",
 	.fw_name_pre = IWL7265D_FW_PRE,
 	IWL_DEVICE_7000,
-	/* sparse doens't like the re-assignment but it is safe */
-#ifndef __CHECKER__
-	.ucode_api_ok = IWL3165_UCODE_API_OK,
-	.ucode_api_min = IWL3165_UCODE_API_MIN,
-#endif
 	.ht_params = &iwl7000_ht_params,
 	.nvm_ver = IWL3165_NVM_VERSION,
 	.nvm_calib_ver = IWL3165_TX_POWER_VERSION,
@@ -348,6 +341,6 @@
 };
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
+MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 197abe4..0116e5a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -72,10 +72,10 @@
 #define IWL8000_UCODE_API_MAX	17
 
 /* Oldest version we won't warn about */
-#define IWL8000_UCODE_API_OK	12
+#define IWL8000_UCODE_API_OK	13
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN	12
+#define IWL8000_UCODE_API_MIN	13
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION		0x0a1d
@@ -107,7 +107,7 @@
 
 static const struct iwl_base_params iwl8000_base_params = {
 	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
-	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.num_of_queues = 31,
 	.pll_cfg_val = 0,
 	.shadow_ram_support = true,
 	.led_compensation = 57,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 939fa22..9109708 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -223,13 +223,13 @@
  * @support_tx_backoff: Support tx-backoff?
  */
 struct iwl_tt_params {
-	s32 ct_kill_entry;
-	s32 ct_kill_exit;
+	u32 ct_kill_entry;
+	u32 ct_kill_exit;
 	u32 ct_kill_duration;
-	s32 dynamic_smps_entry;
-	s32 dynamic_smps_exit;
-	s32 tx_protection_entry;
-	s32 tx_protection_exit;
+	u32 dynamic_smps_entry;
+	u32 dynamic_smps_exit;
+	u32 tx_protection_entry;
+	u32 tx_protection_exit;
 	struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
 	bool support_ct_kill;
 	bool support_dynamic_smps;
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index a86aa5b..463cadf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -450,7 +450,7 @@
 	u32 api_flags = le32_to_cpu(ucode_api->api_flags);
 	int i;
 
-	if (api_index >= IWL_API_MAX_BITS / 32) {
+	if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) {
 		IWL_ERR(drv, "api_index larger than supported by driver\n");
 		/* don't return an error so we can load FW that has more bits */
 		return 0;
@@ -472,7 +472,7 @@
 	u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
 	int i;
 
-	if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) {
+	if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) {
 		IWL_ERR(drv, "api_index larger than supported by driver\n");
 		/* don't return an error so we can load FW that has more bits */
 		return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index af5b320..9dbe19c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -86,6 +86,8 @@
  *	Structured as &struct iwl_fw_error_dump_trigger_desc.
  * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
  *	&struct iwl_fw_error_dump_rb
+ * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were
+ *	paged to the DRAM.
  */
 enum iwl_fw_error_dump_type {
 	/* 0 is deprecated */
@@ -100,6 +102,7 @@
 	IWL_FW_ERROR_DUMP_MEM = 9,
 	IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
 	IWL_FW_ERROR_DUMP_RB = 11,
+	IWL_FW_ERROR_DUMP_PAGING = 12,
 
 	IWL_FW_ERROR_DUMP_MAX,
 };
@@ -240,6 +243,19 @@
 };
 
 /**
+ * struct iwl_fw_error_dump_paging - content of the UMAC's image page
+ *	block on DRAM
+ * @index: the index of the page block
+ * @reserved:
+ * @data: the content of the page block
+ */
+struct iwl_fw_error_dump_paging {
+	__le32 index;
+	__le32 reserved;
+	u8 data[];
+};
+
+/**
  * iwl_fw_error_next_data - advance fw error dump data pointer
  * @data: previous data block
  * Returns: next data block
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 84653e3..08303db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -247,36 +247,31 @@
  * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
  *	longer than the passive one, which is essential for fragmented scan.
  * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
- * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
- * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
  * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
- * @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
- *	through the dedicated host command.
- * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
- * @IWL_UCODE_TLV_API_ASYNC_DTM: Async temperature notifications are supported.
  * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
- * @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
  * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
  * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
  *	instead of 3.
  * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
  *	(command version 3) that supports per-chain limits
+ *
+ * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
 enum iwl_ucode_tlv_api {
 	IWL_UCODE_TLV_API_BT_COEX_SPLIT         = (__force iwl_ucode_tlv_api_t)3,
 	IWL_UCODE_TLV_API_FRAGMENTED_SCAN	= (__force iwl_ucode_tlv_api_t)8,
 	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE	= (__force iwl_ucode_tlv_api_t)9,
-	IWL_UCODE_TLV_API_HDC_PHASE_0		= (__force iwl_ucode_tlv_api_t)10,
-	IWL_UCODE_TLV_API_TX_POWER_DEV		= (__force iwl_ucode_tlv_api_t)11,
 	IWL_UCODE_TLV_API_WIDE_CMD_HDR		= (__force iwl_ucode_tlv_api_t)14,
-	IWL_UCODE_TLV_API_SCD_CFG		= (__force iwl_ucode_tlv_api_t)15,
-	IWL_UCODE_TLV_API_SINGLE_SCAN_EBS	= (__force iwl_ucode_tlv_api_t)16,
-	IWL_UCODE_TLV_API_ASYNC_DTM		= (__force iwl_ucode_tlv_api_t)17,
 	IWL_UCODE_TLV_API_LQ_SS_PARAMS		= (__force iwl_ucode_tlv_api_t)18,
-	IWL_UCODE_TLV_API_STATS_V10		= (__force iwl_ucode_tlv_api_t)19,
 	IWL_UCODE_TLV_API_NEW_VERSION		= (__force iwl_ucode_tlv_api_t)20,
 	IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY	= (__force iwl_ucode_tlv_api_t)24,
 	IWL_UCODE_TLV_API_TX_POWER_CHAIN	= (__force iwl_ucode_tlv_api_t)27,
+
+	NUM_IWL_UCODE_TLV_API
+#ifdef __CHECKER__
+		/* sparse says it cannot increment the previous enum member */
+		= 128
+#endif
 };
 
 typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
@@ -311,6 +306,10 @@
  *	is supported.
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
+ * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
+ *
+ * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
  */
 enum iwl_ucode_tlv_capa {
 	IWL_UCODE_TLV_CAPA_D0I3_SUPPORT			= (__force iwl_ucode_tlv_capa_t)0,
@@ -333,6 +332,14 @@
 	IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC		= (__force iwl_ucode_tlv_capa_t)29,
 	IWL_UCODE_TLV_CAPA_BT_COEX_RRC			= (__force iwl_ucode_tlv_capa_t)30,
 	IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT		= (__force iwl_ucode_tlv_capa_t)31,
+	IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE		= (__force iwl_ucode_tlv_capa_t)64,
+	IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS		= (__force iwl_ucode_tlv_capa_t)65,
+
+	NUM_IWL_UCODE_TLV_CAPA
+#ifdef __CHECKER__
+		/* sparse says it cannot increment the previous enum member */
+		= 128
+#endif
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -343,9 +350,6 @@
 /* The default max probe length if not specified by the firmware file */
 #define IWL_DEFAULT_MAX_PROBE_LENGTH	200
 
-#define IWL_API_MAX_BITS		64
-#define IWL_CAPABILITIES_MAX_BITS	64
-
 /*
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 45e7321..84ec0ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -105,8 +105,8 @@
 	u32 n_scan_channels;
 	u32 standard_phy_calibration_size;
 	u32 flags;
-	unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)];
-	unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)];
+	unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
+	unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
 };
 
 static inline bool
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 27c66e4..0bd9d4a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -36,6 +36,29 @@
 #include "iwl-prph.h"
 #include "iwl-fh.h"
 
+void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+{
+	trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val);
+	iwl_trans_write8(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_write8);
+
+void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+	trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val);
+	iwl_trans_write32(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_write32);
+
+u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
+{
+	u32 val = iwl_trans_read32(trans, ofs);
+
+	trace_iwlwifi_dev_ioread32(trans->dev, ofs, val);
+	return val;
+}
+IWL_EXPORT_SYMBOL(iwl_read32);
+
 #define IWL_POLL_INTERVAL 10	/* microseconds */
 
 int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 705d12c..501d056 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -32,24 +32,9 @@
 #include "iwl-devtrace.h"
 #include "iwl-trans.h"
 
-static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
-{
-	trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val);
-	iwl_trans_write8(trans, ofs, val);
-}
-
-static inline void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val)
-{
-	trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val);
-	iwl_trans_write32(trans, ofs, val);
-}
-
-static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
-{
-	u32 val = iwl_trans_read32(trans, ofs);
-	trace_iwlwifi_dev_ioread32(trans->dev, ofs, val);
-	return val;
-}
+void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val);
+void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val);
+u32 iwl_read32(struct iwl_trans *trans, u32 ofs);
 
 static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
 {
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 3b8e85e..d829849 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -580,13 +580,15 @@
 	IWL_ERR_DEV(dev, "mac address is not found\n");
 }
 
+#define IWL_4165_DEVICE_ID 0x5501
+
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
 		   const __le16 *nvm_hw, const __le16 *nvm_sw,
 		   const __le16 *nvm_calib, const __le16 *regulatory,
 		   const __le16 *mac_override, const __le16 *phy_sku,
 		   u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
-		   u32 mac_addr0, u32 mac_addr1)
+		   u32 mac_addr0, u32 mac_addr1, u32 hw_id)
 {
 	struct iwl_nvm_data *data;
 	u32 sku;
@@ -625,6 +627,17 @@
 				    (sku & NVM_SKU_CAP_11AC_ENABLE);
 	data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
 
+	/*
+	 * OTP 0x52 bug work around
+	 * define antenna 1x1 according to MIMO disabled
+	 */
+	if (hw_id == IWL_4165_DEVICE_ID && data->sku_cap_mimo_disabled) {
+		data->valid_tx_ant = ANT_B;
+		data->valid_rx_ant = ANT_B;
+		tx_chains = ANT_B;
+		rx_chains = ANT_B;
+	}
+
 	data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
 
 	if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index 822ba52..9f44d81 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -79,7 +79,7 @@
 		   const __le16 *nvm_calib, const __le16 *regulatory,
 		   const __le16 *mac_override, const __le16 *phy_sku,
 		   u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
-		   u32 mac_addr0, u32 mac_addr1);
+		   u32 mac_addr0, u32 mac_addr1, u32 hw_id);
 
 /**
  * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index b47fe9d..2a58d68 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -108,7 +110,8 @@
  * interact with it. The driver layer typically calls the start and stop
  * handlers, the transport layer calls the others.
  *
- * All the handlers MUST be implemented
+ * All the handlers MUST be implemented, except @rx_rss which can be left
+ * out *iff* the opmode will never run on hardware with multi-queue capability.
  *
  * @start: start the op_mode. The transport layer is already allocated.
  *	May sleep
@@ -116,6 +119,10 @@
  *	May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
  *	HCMD this Rx responds to. Can't sleep.
+ * @rx_rss: data queue RX notification to the op_mode, for (data) notifications
+ *	received on the RSS queue(s). The queue parameter indicates which of the
+ *	RSS queues received this frame; it will always be non-zero.
+ *	This method must not sleep.
  * @queue_full: notifies that a HW queue is full.
  *	Must be atomic and called with BH disabled.
  * @queue_not_full: notifies that a HW queue is not full any more.
@@ -146,6 +153,8 @@
 	void (*stop)(struct iwl_op_mode *op_mode);
 	void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
 		   struct iwl_rx_cmd_buffer *rxb);
+	void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		       struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
 	void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
 	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
 	bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -186,6 +195,14 @@
 	return op_mode->ops->rx(op_mode, napi, rxb);
 }
 
+static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
+				      struct napi_struct *napi,
+				      struct iwl_rx_cmd_buffer *rxb,
+				      unsigned int queue)
+{
+	op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
+}
+
 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
 					  int queue)
 {
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
index 9f8bcef..7161096 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -87,6 +87,7 @@
 	trans->cfg = cfg;
 	trans->ops = ops;
 	trans->dev_cmd_headroom = dev_cmd_headroom;
+	trans->num_rx_queues = 1;
 
 	snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
 		 "iwl_cmd_pool:%s", dev_name(trans->dev));
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index c829c50..6f76525 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -386,6 +386,7 @@
 #define IWL_MAX_HW_QUEUES		32
 #define IWL_MAX_TID_COUNT	8
 #define IWL_FRAME_LIMIT	64
+#define IWL_MAX_RX_HW_QUEUES	16
 
 /**
  * enum iwl_wowlan_status - WoWLAN image/device status
@@ -408,6 +409,7 @@
  * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
  *	are sent
  * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
+ * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
  */
 enum iwl_trans_status {
 	STATUS_SYNC_HCMD_ACTIVE,
@@ -418,6 +420,7 @@
 	STATUS_FW_ERROR,
 	STATUS_TRANS_GOING_IDLE,
 	STATUS_TRANS_IDLE,
+	STATUS_TRANS_DEAD,
 };
 
 /**
@@ -654,6 +657,8 @@
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
  * @pm_support: set to true in start_hw if link pm is supported
  * @ltr_enabled: set to true if the LTR is enabled
+ * @num_rx_queues: number of RX queues allocated by the transport;
+ *	the transport must set this before calling iwl_drv_start()
  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
  * @dev_cmd_headroom: room needed for the transport's private use before the
@@ -693,6 +698,8 @@
 	bool pm_support;
 	bool ltr_enabled;
 
+	u8 num_rx_queues;
+
 	/* The following fields are internal only */
 	struct kmem_cache *dev_cmd_pool;
 	size_t dev_cmd_headroom;
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index b8ee312..5c21231 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -71,6 +71,9 @@
 #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT	(100 * USEC_PER_MSEC)
 #define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT	(10 * USEC_PER_MSEC)
 #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT	(10 * USEC_PER_MSEC)
+#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT	(2 * 1024) /* defined in TU */
+#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT	(40 * 1024) /* defined in TU */
+#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE	0
 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT		(50 * USEC_PER_MSEC)
 #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT		(50 * USEC_PER_MSEC)
 #define IWL_MVM_UAPSD_QUEUES		(IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
@@ -101,7 +104,7 @@
 #define IWL_MVM_FW_BCAST_FILTER_PASS_ALL	0
 #define IWL_MVM_QUOTA_THRESHOLD			4
 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE         0
-#define IWL_MVM_RS_DISABLE_P2P_MIMO		0
+#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK	1
 #define IWL_MVM_TOF_IS_RESPONDER		0
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 04264e4..85ae902 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -274,18 +274,13 @@
 		break;
 	case WLAN_CIPHER_SUITE_CCMP:
 		if (sta) {
-			u8 *pn = seq.ccmp.pn;
+			u64 pn64;
 
 			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
 			aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
 
-			ieee80211_get_key_tx_seq(key, &seq);
-			aes_tx_sc->pn = cpu_to_le64((u64)pn[5] |
-						    ((u64)pn[4] << 8) |
-						    ((u64)pn[3] << 16) |
-						    ((u64)pn[2] << 24) |
-						    ((u64)pn[1] << 32) |
-						    ((u64)pn[0] << 40));
+			pn64 = atomic64_read(&key->tx_pn);
+			aes_tx_sc->pn = cpu_to_le64(pn64);
 		} else {
 			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
 		}
@@ -298,12 +293,12 @@
 			u8 *pn = seq.ccmp.pn;
 
 			ieee80211_get_key_rx_seq(key, i, &seq);
-			aes_sc->pn = cpu_to_le64((u64)pn[5] |
-						 ((u64)pn[4] << 8) |
-						 ((u64)pn[3] << 16) |
-						 ((u64)pn[2] << 24) |
-						 ((u64)pn[1] << 32) |
-						 ((u64)pn[0] << 40));
+			aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
+						   ((u64)pn[4] << 8) |
+						   ((u64)pn[3] << 16) |
+						   ((u64)pn[2] << 24) |
+						   ((u64)pn[1] << 32) |
+						   ((u64)pn[0] << 40));
 		}
 		data->use_rsc_tsc = true;
 		break;
@@ -1170,6 +1165,9 @@
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 	int ret;
 
+	/* make sure the d0i3 exit work is not pending */
+	flush_work(&mvm->d0i3_exit_work);
+
 	ret = iwl_trans_suspend(mvm->trans);
 	if (ret)
 		return ret;
@@ -1453,15 +1451,15 @@
 
 		switch (key->cipher) {
 		case WLAN_CIPHER_SUITE_CCMP:
-			iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq);
 			iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
+			atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
 			break;
 		case WLAN_CIPHER_SUITE_TKIP:
 			iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
 			iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+			ieee80211_set_key_tx_seq(key, &seq);
 			break;
 		}
-		ieee80211_set_key_tx_seq(key, &seq);
 
 		/* that's it for this key */
 		return;
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 383a316..7904b41 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -511,7 +511,8 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm *mvm = mvmvif->mvm;
-	int value, ret = -EINVAL;
+	u32 value;
+	int ret = -EINVAL;
 	char *data;
 
 	mutex_lock(&mvm->mutex);
@@ -599,7 +600,8 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm *mvm = mvmvif->mvm;
-	int value, ret = 0;
+	u32 value;
+	int ret = 0;
 	char *data;
 
 	mutex_lock(&mvm->mutex);
@@ -713,11 +715,30 @@
 		goto out;
 	}
 
-	data = iwl_dbgfs_is_match("ctrl_ch_position=", buf);
+	data = iwl_dbgfs_is_match("center_freq=", buf);
 	if (data) {
+		struct iwl_tof_responder_config_cmd *cmd =
+			&mvm->tof_data.responder_cfg;
+
 		ret = kstrtou32(data, 10, &value);
-		if (ret == 0)
-			mvm->tof_data.responder_cfg.ctrl_ch_position = value;
+		if (ret == 0 && value) {
+			enum ieee80211_band band = (cmd->channel_num <= 14) ?
+						   IEEE80211_BAND_2GHZ :
+						   IEEE80211_BAND_5GHZ;
+			struct ieee80211_channel chn = {
+				.band = band,
+				.center_freq = ieee80211_channel_to_frequency(
+					cmd->channel_num, band),
+				};
+			struct cfg80211_chan_def chandef = {
+				.chan =  &chn,
+				.center_freq1 =
+					ieee80211_channel_to_frequency(value,
+								       band),
+			};
+
+			cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef);
+		}
 		goto out;
 	}
 
@@ -822,7 +843,8 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm *mvm = mvmvif->mvm;
-	int value, ret = 0;
+	u32 value;
+	int ret = 0;
 	char *data;
 
 	mutex_lock(&mvm->mutex);
@@ -892,6 +914,7 @@
 			goto out;
 		}
 		memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+		goto out;
 	}
 
 	data = iwl_dbgfs_is_match("macaddr_mask=", buf);
@@ -903,21 +926,22 @@
 			goto out;
 		}
 		memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+		goto out;
 	}
 
 	data = iwl_dbgfs_is_match("ap=", buf);
 	if (data) {
-		struct iwl_tof_range_req_ap_entry ap;
+		struct iwl_tof_range_req_ap_entry ap = {};
 		int size = sizeof(struct iwl_tof_range_req_ap_entry);
 		u16 burst_period;
 		u8 *mac = ap.bssid;
 		unsigned int i;
 
-		if (sscanf(data, "%u %hhd %hhx %hhx"
+		if (sscanf(data, "%u %hhd %hhd %hhd"
 			   "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
-			   "%hhx %hhx %hx"
-			   "%hhx %hhx %x"
-			   "%hhx %hhx %hhx %hhx",
+			   "%hhd %hhd %hd"
+			   "%hhd %hhd %d"
+			   "%hhx %hhd %hhd %hhd",
 			   &i, &ap.channel_num, &ap.bandwidth,
 			   &ap.ctrl_ch_position,
 			   mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
@@ -944,12 +968,12 @@
 	data = iwl_dbgfs_is_match("send_range_request=", buf);
 	if (data) {
 		ret = kstrtou32(data, 10, &value);
-		if (ret == 0 && value) {
+		if (ret == 0 && value)
 			ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
-			goto out;
-		}
+		goto out;
 	}
 
+	ret = -EINVAL;
 out:
 	mutex_unlock(&mvm->mutex);
 	return ret ?: count;
@@ -994,16 +1018,18 @@
 		struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
 
 		pos += scnprintf(buf + pos, bufsz - pos,
-				"ap %.2d: channel_num=%hhx bw=%hhx"
-				" control=%hhx bssid=%pM type=%hhx"
-				" num_of_bursts=%hhx burst_period=%hx ftm=%hhx"
-				" retries=%hhx tsf_delta=%x location_req=%hhx "
-				" asap=%hhx enable=%hhx rssi=%hhx\n",
+				"ap %.2d: channel_num=%hhd bw=%hhd"
+				" control=%hhd bssid=%pM type=%hhd"
+				" num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
+				" retries=%hhd tsf_delta=%d"
+				" tsf_delta_direction=%hhd location_req=0x%hhx "
+				" asap=%hhd enable=%hhd rssi=%hhd\n",
 				i, ap->channel_num, ap->bandwidth,
 				ap->ctrl_ch_position, ap->bssid,
 				ap->measure_type, ap->num_of_bursts,
 				ap->burst_period, ap->samples_per_burst,
 				ap->retries_per_sample, ap->tsf_delta,
+				ap->tsf_delta_direction,
 				ap->location_req, ap->asap_mode,
 				ap->enable_dyn_ack, ap->rssi);
 	}
@@ -1019,7 +1045,8 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm *mvm = mvmvif->mvm;
-	int value, ret = 0;
+	u32 value;
+	int ret = 0;
 	char *data;
 
 	mutex_lock(&mvm->mutex);
@@ -1071,12 +1098,12 @@
 	data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
 	if (data) {
 		ret = kstrtou32(data, 10, &value);
-		if (ret == 0 && value) {
+		if (ret == 0 && value)
 			ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
-			goto out;
-		}
+		goto out;
 	}
 
+	ret = -EINVAL;
 out:
 	mutex_unlock(&mvm->mutex);
 	return ret ?: count;
@@ -1099,18 +1126,18 @@
 	mutex_lock(&mvm->mutex);
 
 	pos += scnprintf(buf + pos, bufsz - pos,
-			 "tsf_timer_offset_msec = %hx\n",
+			 "tsf_timer_offset_msec = %hd\n",
 			 cmd->tsf_timer_offset_msec);
-	pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n",
+	pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n",
 			 cmd->min_delta_ftm);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			 "ftm_format_and_bw20M = %hhx\n",
+			 "ftm_format_and_bw20M = %hhd\n",
 			 cmd->ftm_format_and_bw20M);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			 "ftm_format_and_bw40M = %hhx\n",
+			 "ftm_format_and_bw40M = %hhd\n",
 			 cmd->ftm_format_and_bw40M);
 	pos += scnprintf(buf + pos, bufsz - pos,
-			 "ftm_format_and_bw80M = %hhx\n",
+			 "ftm_format_and_bw80M = %hhd\n",
 			 cmd->ftm_format_and_bw80M);
 
 	mutex_unlock(&mvm->mutex);
@@ -1123,8 +1150,8 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm *mvm = mvmvif->mvm;
-	int value, ret = 0;
-	int abort_id;
+	u32 value;
+	int abort_id, ret = 0;
 	char *data;
 
 	mutex_lock(&mvm->mutex);
@@ -1205,11 +1232,11 @@
 		struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
 
 		pos += scnprintf(buf + pos, bufsz - pos,
-				"ap %.2d: bssid=%pM status=%hhx bw=%hhx"
-				" rtt=%x rtt_var=%x rtt_spread=%x"
-				" rssi=%hhx  rssi_spread=%hhx"
-				" range=%x range_var=%x"
-				" time_stamp=%x\n",
+				"ap %.2d: bssid=%pM status=%hhd bw=%hhd"
+				" rtt=%d rtt_var=%d rtt_spread=%d"
+				" rssi=%hhd  rssi_spread=%hhd"
+				" range=%d range_var=%d"
+				" time_stamp=%d\n",
 				i, ap->bssid, ap->measure_status,
 				ap->measure_bw,
 				ap->rtt, ap->rtt_variance, ap->rtt_spread,
@@ -1250,11 +1277,10 @@
 {
 	struct ieee80211_vif *vif = file->private_data;
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-	char buf[3];
+	char buf[2];
 
 	buf[0] = mvmvif->low_latency ? '1' : '0';
 	buf[1] = '\n';
-	buf[2] = '\0';
 	return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
 }
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 7d69a55..05928fb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -85,7 +85,7 @@
 	IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
 
 	mutex_lock(&mvm->mutex);
-	ret =  iwl_mvm_flush_tx_path(mvm, scd_q_msk, true) ? : count;
+	ret =  iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count;
 	mutex_unlock(&mvm->mutex);
 
 	return ret;
@@ -1214,118 +1214,6 @@
 
 	return ret;
 }
-
-#define MAX_NUM_ND_MATCHSETS 10
-
-static ssize_t iwl_dbgfs_netdetect_write(struct iwl_mvm *mvm, char *buf,
-					 size_t count, loff_t *ppos)
-{
-	const char *seps = ",\n";
-	char *buf_ptr = buf;
-	char *value_str = NULL;
-	int ret, i;
-
-	/* TODO: don't free if write is being called several times in one go */
-	if (mvm->nd_config) {
-		kfree(mvm->nd_config->match_sets);
-		kfree(mvm->nd_config);
-		mvm->nd_config = NULL;
-	}
-
-	mvm->nd_config = kzalloc(sizeof(*mvm->nd_config) +
-				 (11 * sizeof(struct ieee80211_channel *)),
-				 GFP_KERNEL);
-	if (!mvm->nd_config) {
-		ret = -ENOMEM;
-		goto out_free;
-	}
-
-	mvm->nd_config->n_channels = 11;
-	mvm->nd_config->scan_width = NL80211_BSS_CHAN_WIDTH_20;
-	mvm->nd_config->interval = 5;
-	mvm->nd_config->min_rssi_thold = -80;
-	for (i = 0; i < mvm->nd_config->n_channels; i++)
-		mvm->nd_config->channels[i] = &mvm->nvm_data->channels[i];
-
-	mvm->nd_config->match_sets =
-		kcalloc(MAX_NUM_ND_MATCHSETS,
-			sizeof(*mvm->nd_config->match_sets),
-			GFP_KERNEL);
-	if (!mvm->nd_config->match_sets) {
-		ret = -ENOMEM;
-		goto out_free;
-	}
-
-	while ((value_str = strsep(&buf_ptr, seps)) &&
-	       strlen(value_str)) {
-		struct cfg80211_match_set *set;
-
-		if (mvm->nd_config->n_match_sets >= MAX_NUM_ND_MATCHSETS) {
-			ret = -EINVAL;
-			goto out_free;
-		}
-
-		set = &mvm->nd_config->match_sets[mvm->nd_config->n_match_sets];
-		set->ssid.ssid_len = strlen(value_str);
-
-		if (set->ssid.ssid_len > IEEE80211_MAX_SSID_LEN) {
-			ret = -EINVAL;
-			goto out_free;
-		}
-
-		memcpy(set->ssid.ssid, value_str, set->ssid.ssid_len);
-
-		mvm->nd_config->n_match_sets++;
-	}
-
-	ret = count;
-
-	if (mvm->nd_config->n_match_sets)
-		goto out;
-
-out_free:
-	if (mvm->nd_config)
-		kfree(mvm->nd_config->match_sets);
-	kfree(mvm->nd_config);
-	mvm->nd_config = NULL;
-out:
-	return ret;
-}
-
-static ssize_t
-iwl_dbgfs_netdetect_read(struct file *file, char __user *user_buf,
-			 size_t count, loff_t *ppos)
-{
-	struct iwl_mvm *mvm = file->private_data;
-	size_t bufsz, ret;
-	char *buf;
-	int i, n_match_sets, pos = 0;
-
-	n_match_sets = mvm->nd_config ? mvm->nd_config->n_match_sets : 0;
-
-	bufsz = n_match_sets * (IEEE80211_MAX_SSID_LEN + 1) + 1;
-	buf = kzalloc(bufsz, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	for (i = 0; i < n_match_sets; i++) {
-		if (pos +
-		    mvm->nd_config->match_sets[i].ssid.ssid_len + 2 > bufsz) {
-			ret = -EIO;
-			goto out;
-		}
-
-		memcpy(buf + pos, mvm->nd_config->match_sets[i].ssid.ssid,
-		       mvm->nd_config->match_sets[i].ssid.ssid_len);
-		pos += mvm->nd_config->match_sets[i].ssid.ssid_len;
-		buf[pos++] = '\n';
-	}
-
-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-out:
-	kfree(buf);
-	return ret;
-}
 #endif
 
 #define PRINT_MVM_REF(ref) do {						\
@@ -1473,11 +1361,25 @@
 	return count;
 }
 
+static ssize_t
+iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
+			      size_t count, loff_t *ppos)
+{
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+	ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
 
 /* Device wide debugfs entries */
 MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
 MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
 MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
@@ -1503,7 +1405,6 @@
 
 #ifdef CONFIG_PM_SLEEP
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(netdetect, 384);
 #endif
 
 int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
@@ -1538,6 +1439,7 @@
 	MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
 	MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
 	MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
+	MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
 	if (!debugfs_create_bool("enable_scan_iteration_notif",
 				 S_IRUSR | S_IWUSR,
 				 mvm->debugfs_dir,
@@ -1572,7 +1474,6 @@
 	if (!debugfs_create_u32("last_netdetect_scans", S_IRUSR,
 				mvm->debugfs_dir, &mvm->last_netdetect_scans))
 		goto err;
-	MVM_DEBUGFS_ADD_FILE(netdetect, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
 #endif
 
 	if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR,
@@ -1594,6 +1495,9 @@
 	if (!debugfs_create_blob("nvm_prod", S_IRUSR,
 				  mvm->debugfs_dir, &mvm->nvm_prod_blob))
 		goto err;
+	if (!debugfs_create_blob("nvm_phy_sku", S_IRUSR,
+				 mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
+		goto err;
 
 	/*
 	 * Create a symlink with mac80211. It will be removed when mac80211
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 7005fa4..c8f3e25 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -192,16 +192,10 @@
 /**
  * enum iwl_device_power_flags - masks for device power command flags
  * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
- *	receiver and transmitter. '0' - does not allow. This flag should be
- *	always set to '1' unless one need to disable actual power down for debug
- *	purposes.
- * @DEVICE_POWER_FLAGS_CAM_MSK: '1' CAM (Continuous Active Mode) is set, meaning
- *	that power management is disabled. '0' Power management is enabled, one
- *	of power schemes is applied.
+ *	receiver and transmitter. '0' - does not allow.
 */
 enum iwl_device_power_flags {
 	DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK	= BIT(0),
-	DEVICE_POWER_FLAGS_CAM_MSK		= BIT(13),
 };
 
 /**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rx.h
new file mode 100644
index 0000000..9b7e49d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rx.h
@@ -0,0 +1,238 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_rx_h__
+#define __fw_api_rx_h__
+
+#define IWL_RX_INFO_PHY_CNT 8
+#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
+#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
+#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
+#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
+#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
+#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
+#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
+
+/**
+ * struct iwl_rx_phy_info - phy info
+ * (REPLY_RX_PHY_CMD = 0xc0)
+ * @non_cfg_phy_cnt: non configurable DSP phy data byte count
+ * @cfg_phy_cnt: configurable DSP phy data byte count
+ * @stat_id: configurable DSP phy data set ID
+ * @reserved1:
+ * @system_timestamp: GP2  at on air rise
+ * @timestamp: TSF at on air rise
+ * @beacon_time_stamp: beacon at on-air rise
+ * @phy_flags: general phy flags: band, modulation, ...
+ * @channel: channel number
+ * @non_cfg_phy_buf: for various implementations of non_cfg_phy
+ * @rate_n_flags: RATE_MCS_*
+ * @byte_count: frame's byte-count
+ * @frame_time: frame's time on the air, based on byte count and frame rate
+ *	calculation
+ * @mac_active_msk: what MACs were active when the frame was received
+ *
+ * Before each Rx, the device sends this data. It contains PHY information
+ * about the reception of the packet.
+ */
+struct iwl_rx_phy_info {
+	u8 non_cfg_phy_cnt;
+	u8 cfg_phy_cnt;
+	u8 stat_id;
+	u8 reserved1;
+	__le32 system_timestamp;
+	__le64 timestamp;
+	__le32 beacon_time_stamp;
+	__le16 phy_flags;
+	__le16 channel;
+	__le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
+	__le32 rate_n_flags;
+	__le32 byte_count;
+	__le16 mac_active_msk;
+	__le16 frame_time;
+} __packed;
+
+/*
+ * TCP offload Rx assist info
+ *
+ * bits 0:3 - reserved
+ * bits 4:7 - MIC CRC length
+ * bits 8:12 - MAC header length
+ * bit 13 - Padding indication
+ * bit 14 - A-AMSDU indication
+ * bit 15 - Offload enabled
+ */
+enum iwl_csum_rx_assist_info {
+	CSUM_RXA_RESERVED_MASK	= 0x000f,
+	CSUM_RXA_MICSIZE_MASK	= 0x00f0,
+	CSUM_RXA_HEADERLEN_MASK	= 0x1f00,
+	CSUM_RXA_PADD		= BIT(13),
+	CSUM_RXA_AMSDU		= BIT(14),
+	CSUM_RXA_ENA		= BIT(15)
+};
+
+/**
+ * struct iwl_rx_mpdu_res_start - phy info
+ * @assist: see CSUM_RX_ASSIST_ above
+ */
+struct iwl_rx_mpdu_res_start {
+	__le16 byte_count;
+	__le16 assist;
+} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
+
+/**
+ * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
+ * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
+ * @RX_RES_PHY_FLAGS_MOD_CCK:
+ * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
+ * @RX_RES_PHY_FLAGS_NARROW_BAND:
+ * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
+ * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
+ * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
+ * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
+ * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
+ */
+enum iwl_rx_phy_flags {
+	RX_RES_PHY_FLAGS_BAND_24	= BIT(0),
+	RX_RES_PHY_FLAGS_MOD_CCK	= BIT(1),
+	RX_RES_PHY_FLAGS_SHORT_PREAMBLE	= BIT(2),
+	RX_RES_PHY_FLAGS_NARROW_BAND	= BIT(3),
+	RX_RES_PHY_FLAGS_ANTENNA	= (0x7 << 4),
+	RX_RES_PHY_FLAGS_ANTENNA_POS	= 4,
+	RX_RES_PHY_FLAGS_AGG		= BIT(7),
+	RX_RES_PHY_FLAGS_OFDM_HT	= BIT(8),
+	RX_RES_PHY_FLAGS_OFDM_GF	= BIT(9),
+	RX_RES_PHY_FLAGS_OFDM_VHT	= BIT(10),
+};
+
+/**
+ * enum iwl_mvm_rx_status - written by fw for each Rx packet
+ * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
+ * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
+ * @RX_MPDU_RES_STATUS_SRC_STA_FOUND:
+ * @RX_MPDU_RES_STATUS_KEY_VALID:
+ * @RX_MPDU_RES_STATUS_KEY_PARAM_OK:
+ * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
+ * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
+ *	in the driver.
+ * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
+ * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR:  valid for alg = CCM_CMAC or
+ *	alg = CCM only. Checks replay attack for 11w frames. Relevant only if
+ *	%RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
+ * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
+ * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
+ * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
+ * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
+ * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
+ * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
+ * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
+ * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
+ * @RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
+ * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
+ * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
+ * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
+ * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
+ * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
+ * @RX_MPDU_RES_STATUS_STA_ID_MSK:
+ * @RX_MPDU_RES_STATUS_RRF_KILL:
+ * @RX_MPDU_RES_STATUS_FILTERING_MSK:
+ * @RX_MPDU_RES_STATUS2_FILTERING_MSK:
+ */
+enum iwl_mvm_rx_status {
+	RX_MPDU_RES_STATUS_CRC_OK			= BIT(0),
+	RX_MPDU_RES_STATUS_OVERRUN_OK			= BIT(1),
+	RX_MPDU_RES_STATUS_SRC_STA_FOUND		= BIT(2),
+	RX_MPDU_RES_STATUS_KEY_VALID			= BIT(3),
+	RX_MPDU_RES_STATUS_KEY_PARAM_OK			= BIT(4),
+	RX_MPDU_RES_STATUS_ICV_OK			= BIT(5),
+	RX_MPDU_RES_STATUS_MIC_OK			= BIT(6),
+	RX_MPDU_RES_STATUS_TTAK_OK			= BIT(7),
+	RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR		= BIT(7),
+	RX_MPDU_RES_STATUS_SEC_NO_ENC			= (0 << 8),
+	RX_MPDU_RES_STATUS_SEC_WEP_ENC			= (1 << 8),
+	RX_MPDU_RES_STATUS_SEC_CCM_ENC			= (2 << 8),
+	RX_MPDU_RES_STATUS_SEC_TKIP_ENC			= (3 << 8),
+	RX_MPDU_RES_STATUS_SEC_EXT_ENC			= (4 << 8),
+	RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC		= (6 << 8),
+	RX_MPDU_RES_STATUS_SEC_ENC_ERR			= (7 << 8),
+	RX_MPDU_RES_STATUS_SEC_ENC_MSK			= (7 << 8),
+	RX_MPDU_RES_STATUS_DEC_DONE			= BIT(11),
+	RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP	= BIT(12),
+	RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP		= BIT(13),
+	RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT		= BIT(14),
+	RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME		= BIT(15),
+	RX_MPDU_RES_STATUS_CSUM_DONE			= BIT(16),
+	RX_MPDU_RES_STATUS_CSUM_OK			= BIT(17),
+	RX_MPDU_RES_STATUS_HASH_INDEX_MSK		= (0x3F0000),
+	RX_MPDU_RES_STATUS_STA_ID_MSK			= (0x1f000000),
+	RX_MPDU_RES_STATUS_RRF_KILL			= BIT(29),
+	RX_MPDU_RES_STATUS_FILTERING_MSK		= (0xc00000),
+	RX_MPDU_RES_STATUS2_FILTERING_MSK		= (0xc0000000),
+};
+
+#endif /* __fw_api_rx_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 660cc1c..3a657e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -101,6 +101,7 @@
 
 #define IWL_FULL_SCAN_MULTIPLIER 5
 #define IWL_FAST_SCHED_SCAN_ITERATIONS 3
+#define IWL_MAX_SCHED_SCAN_PLANS 2
 
 enum scan_framework_client {
 	SCAN_CLIENT_SCHED_SCAN		= BIT(0),
@@ -359,7 +360,7 @@
 	/* SCAN_REQ_PERIODIC_PARAMS_API_S */
 	__le32 iter_num;
 	__le32 delay;
-	struct iwl_scan_schedule_lmac schedule[2];
+	struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS];
 	struct iwl_scan_channel_opt channel_opt[2];
 	u8 data[];
 } __packed;
@@ -582,7 +583,7 @@
  */
 struct iwl_scan_req_umac_tail {
 	/* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
-	struct iwl_scan_umac_schedule schedule[2];
+	struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
 	__le16 delay;
 	__le16 reserved;
 	/* SCAN_PROBE_PARAMS_API_S_VER_1 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
index 709e28d..0c321f6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-stats.h
@@ -219,32 +219,6 @@
 	__le32 lo_priority_rx_denied_cnt;
 } __packed;  /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
 
-struct mvm_statistics_general_v5 {
-	__le32 radio_temperature;
-	__le32 radio_voltage;
-	struct mvm_statistics_dbg dbg;
-	__le32 sleep_time;
-	__le32 slots_out;
-	__le32 slots_idle;
-	__le32 ttl_timestamp;
-	struct mvm_statistics_div slow_div;
-	__le32 rx_enable_counter;
-	/*
-	 * num_of_sos_states:
-	 *  count the number of times we have to re-tune
-	 *  in order to get out of bad PHY status
-	 */
-	__le32 num_of_sos_states;
-	__le32 beacon_filtered;
-	__le32 missed_beacons;
-	__s8 beacon_filter_average_energy;
-	__s8 beacon_filter_reason;
-	__s8 beacon_filter_current_energy;
-	__s8 beacon_filter_reserved;
-	__le32 beacon_filter_delta_time;
-	struct mvm_statistics_bt_activity bt_activity;
-} __packed; /* STATISTICS_GENERAL_API_S_VER_5 */
-
 struct mvm_statistics_general_v8 {
 	__le32 radio_temperature;
 	__le32 radio_voltage;
@@ -263,10 +237,10 @@
 	__le32 num_of_sos_states;
 	__le32 beacon_filtered;
 	__le32 missed_beacons;
-	__s8 beacon_filter_average_energy;
-	__s8 beacon_filter_reason;
-	__s8 beacon_filter_current_energy;
-	__s8 beacon_filter_reserved;
+	u8 beacon_filter_average_energy;
+	u8 beacon_filter_reason;
+	u8 beacon_filter_current_energy;
+	u8 beacon_filter_reserved;
 	__le32 beacon_filter_delta_time;
 	struct mvm_statistics_bt_activity bt_activity;
 	__le64 rx_time;
@@ -293,13 +267,6 @@
  * STATISTICS_CMD (0x9c), below.
  */
 
-struct iwl_notif_statistics_v8 {
-	__le32 flag;
-	struct mvm_statistics_rx rx;
-	struct mvm_statistics_tx tx;
-	struct mvm_statistics_general_v5 general;
-} __packed; /* STATISTICS_NTFY_API_S_VER_8 */
-
 struct iwl_notif_statistics_v10 {
 	__le32 flag;
 	struct mvm_statistics_rx rx;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 4af7513a..181590f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -67,6 +67,7 @@
 #define __fw_api_h__
 
 #include "fw-api-rs.h"
+#include "fw-api-rx.h"
 #include "fw-api-tx.h"
 #include "fw-api-sta.h"
 #include "fw-api-mac.h"
@@ -100,6 +101,7 @@
 enum {
 	MVM_ALIVE = 0x1,
 	REPLY_ERROR = 0x2,
+	ECHO_CMD = 0x3,
 
 	INIT_COMPLETE_NOTIF = 0x4,
 
@@ -266,6 +268,16 @@
 	REPLY_MAX = 0xff,
 };
 
+enum iwl_phy_ops_subcmd_ids {
+	CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
+	DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
+};
+
+/* command groups */
+enum {
+	PHY_OPS_GROUP = 0x4,
+};
+
 /**
  * struct iwl_cmd_response - generic response struct for most commands
  * @status: status of the command asked, changes for each one
@@ -1070,190 +1082,6 @@
 	__le32 status;
 } __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
 
-#define IWL_RX_INFO_PHY_CNT 8
-#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
-#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
-#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
-#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
-#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
-#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
-#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
-
-#define IWL_RX_INFO_AGC_IDX 1
-#define IWL_RX_INFO_RSSI_AB_IDX 2
-#define IWL_OFDM_AGC_A_MSK 0x0000007f
-#define IWL_OFDM_AGC_A_POS 0
-#define IWL_OFDM_AGC_B_MSK 0x00003f80
-#define IWL_OFDM_AGC_B_POS 7
-#define IWL_OFDM_AGC_CODE_MSK 0x3fe00000
-#define IWL_OFDM_AGC_CODE_POS 20
-#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
-#define IWL_OFDM_RSSI_A_POS 0
-#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
-#define IWL_OFDM_RSSI_ALLBAND_A_POS 8
-#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
-#define IWL_OFDM_RSSI_B_POS 16
-#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
-#define IWL_OFDM_RSSI_ALLBAND_B_POS 24
-
-/**
- * struct iwl_rx_phy_info - phy info
- * (REPLY_RX_PHY_CMD = 0xc0)
- * @non_cfg_phy_cnt: non configurable DSP phy data byte count
- * @cfg_phy_cnt: configurable DSP phy data byte count
- * @stat_id: configurable DSP phy data set ID
- * @reserved1:
- * @system_timestamp: GP2  at on air rise
- * @timestamp: TSF at on air rise
- * @beacon_time_stamp: beacon at on-air rise
- * @phy_flags: general phy flags: band, modulation, ...
- * @channel: channel number
- * @non_cfg_phy_buf: for various implementations of non_cfg_phy
- * @rate_n_flags: RATE_MCS_*
- * @byte_count: frame's byte-count
- * @frame_time: frame's time on the air, based on byte count and frame rate
- *	calculation
- * @mac_active_msk: what MACs were active when the frame was received
- *
- * Before each Rx, the device sends this data. It contains PHY information
- * about the reception of the packet.
- */
-struct iwl_rx_phy_info {
-	u8 non_cfg_phy_cnt;
-	u8 cfg_phy_cnt;
-	u8 stat_id;
-	u8 reserved1;
-	__le32 system_timestamp;
-	__le64 timestamp;
-	__le32 beacon_time_stamp;
-	__le16 phy_flags;
-	__le16 channel;
-	__le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
-	__le32 rate_n_flags;
-	__le32 byte_count;
-	__le16 mac_active_msk;
-	__le16 frame_time;
-} __packed;
-
-/*
- * TCP offload Rx assist info
- *
- * bits 0:3 - reserved
- * bits 4:7 - MIC CRC length
- * bits 8:12 - MAC header length
- * bit 13 - Padding indication
- * bit 14 - A-AMSDU indication
- * bit 15 - Offload enabled
- */
-enum iwl_csum_rx_assist_info {
-	CSUM_RXA_RESERVED_MASK	= 0x000f,
-	CSUM_RXA_MICSIZE_MASK	= 0x00f0,
-	CSUM_RXA_HEADERLEN_MASK	= 0x1f00,
-	CSUM_RXA_PADD		= BIT(13),
-	CSUM_RXA_AMSDU		= BIT(14),
-	CSUM_RXA_ENA		= BIT(15)
-};
-
-/**
- * struct iwl_rx_mpdu_res_start - phy info
- * @assist: see CSUM_RX_ASSIST_ above
- */
-struct iwl_rx_mpdu_res_start {
-	__le16 byte_count;
-	__le16 assist;
-} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
-
-/**
- * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
- * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
- * @RX_RES_PHY_FLAGS_MOD_CCK:
- * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
- * @RX_RES_PHY_FLAGS_NARROW_BAND:
- * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
- * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
- * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
- * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
- * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
- */
-enum iwl_rx_phy_flags {
-	RX_RES_PHY_FLAGS_BAND_24	= BIT(0),
-	RX_RES_PHY_FLAGS_MOD_CCK	= BIT(1),
-	RX_RES_PHY_FLAGS_SHORT_PREAMBLE	= BIT(2),
-	RX_RES_PHY_FLAGS_NARROW_BAND	= BIT(3),
-	RX_RES_PHY_FLAGS_ANTENNA	= (0x7 << 4),
-	RX_RES_PHY_FLAGS_ANTENNA_POS	= 4,
-	RX_RES_PHY_FLAGS_AGG		= BIT(7),
-	RX_RES_PHY_FLAGS_OFDM_HT	= BIT(8),
-	RX_RES_PHY_FLAGS_OFDM_GF	= BIT(9),
-	RX_RES_PHY_FLAGS_OFDM_VHT	= BIT(10),
-};
-
-/**
- * enum iwl_mvm_rx_status - written by fw for each Rx packet
- * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
- * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
- * @RX_MPDU_RES_STATUS_SRC_STA_FOUND:
- * @RX_MPDU_RES_STATUS_KEY_VALID:
- * @RX_MPDU_RES_STATUS_KEY_PARAM_OK:
- * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
- * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
- *	in the driver.
- * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
- * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR:  valid for alg = CCM_CMAC or
- *	alg = CCM only. Checks replay attack for 11w frames. Relevant only if
- *	%RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
- * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
- * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
- * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
- * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
- * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
- * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
- * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
- * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
- * @RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
- * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
- * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
- * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
- * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
- * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
- * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
- * @RX_MPDU_RES_STATUS_STA_ID_MSK:
- * @RX_MPDU_RES_STATUS_RRF_KILL:
- * @RX_MPDU_RES_STATUS_FILTERING_MSK:
- * @RX_MPDU_RES_STATUS2_FILTERING_MSK:
- */
-enum iwl_mvm_rx_status {
-	RX_MPDU_RES_STATUS_CRC_OK			= BIT(0),
-	RX_MPDU_RES_STATUS_OVERRUN_OK			= BIT(1),
-	RX_MPDU_RES_STATUS_SRC_STA_FOUND		= BIT(2),
-	RX_MPDU_RES_STATUS_KEY_VALID			= BIT(3),
-	RX_MPDU_RES_STATUS_KEY_PARAM_OK			= BIT(4),
-	RX_MPDU_RES_STATUS_ICV_OK			= BIT(5),
-	RX_MPDU_RES_STATUS_MIC_OK			= BIT(6),
-	RX_MPDU_RES_STATUS_TTAK_OK			= BIT(7),
-	RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR		= BIT(7),
-	RX_MPDU_RES_STATUS_SEC_NO_ENC			= (0 << 8),
-	RX_MPDU_RES_STATUS_SEC_WEP_ENC			= (1 << 8),
-	RX_MPDU_RES_STATUS_SEC_CCM_ENC			= (2 << 8),
-	RX_MPDU_RES_STATUS_SEC_TKIP_ENC			= (3 << 8),
-	RX_MPDU_RES_STATUS_SEC_EXT_ENC			= (4 << 8),
-	RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC		= (6 << 8),
-	RX_MPDU_RES_STATUS_SEC_ENC_ERR			= (7 << 8),
-	RX_MPDU_RES_STATUS_SEC_ENC_MSK			= (7 << 8),
-	RX_MPDU_RES_STATUS_DEC_DONE			= BIT(11),
-	RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP	= BIT(12),
-	RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP		= BIT(13),
-	RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT		= BIT(14),
-	RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME		= BIT(15),
-	RX_MPDU_RES_STATUS_CSUM_DONE			= BIT(16),
-	RX_MPDU_RES_STATUS_CSUM_OK			= BIT(17),
-	RX_MPDU_RES_STATUS_HASH_INDEX_MSK		= (0x3F0000),
-	RX_MPDU_RES_STATUS_STA_ID_MSK			= (0x1f000000),
-	RX_MPDU_RES_STATUS_RRF_KILL			= BIT(29),
-	RX_MPDU_RES_STATUS_FILTERING_MSK		= (0xc00000),
-	RX_MPDU_RES_STATUS2_FILTERING_MSK		= (0xc0000000),
-};
-
 /**
  * struct iwl_radio_version_notif - information on the radio version
  * ( RADIO_VERSION_NOTIFICATION = 0x68 )
@@ -1696,6 +1524,69 @@
 } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */
 
 /**
+* enum iwl_dts_control_measurement_mode - DTS measurement type
+* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read
+*                 back (latest value. Not waiting for new value). Use automatic
+*                 SW DTS configuration.
+* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings,
+*                    trigger DTS reading and provide read back temperature read
+*                    when available.
+* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read
+* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result,
+*                              without measurement trigger.
+*/
+enum iwl_dts_control_measurement_mode {
+	DTS_AUTOMATIC			= 0,
+	DTS_REQUEST_READ		= 1,
+	DTS_OVER_WRITE			= 2,
+	DTS_DIRECT_WITHOUT_MEASURE	= 3,
+};
+
+/**
+* enum iwl_dts_used - DTS to use or used for measurement in the DTS request
+* @DTS_USE_TOP: Top
+* @DTS_USE_CHAIN_A: chain A
+* @DTS_USE_CHAIN_B: chain B
+* @DTS_USE_CHAIN_C: chain C
+* @XTAL_TEMPERATURE - read temperature from xtal
+*/
+enum iwl_dts_used {
+	DTS_USE_TOP		= 0,
+	DTS_USE_CHAIN_A		= 1,
+	DTS_USE_CHAIN_B		= 2,
+	DTS_USE_CHAIN_C		= 3,
+	XTAL_TEMPERATURE	= 4,
+};
+
+/**
+* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode
+* @DTS_BIT6_MODE: bit 6 mode
+* @DTS_BIT8_MODE: bit 8 mode
+*/
+enum iwl_dts_bit_mode {
+	DTS_BIT6_MODE	= 0,
+	DTS_BIT8_MODE	= 1,
+};
+
+/**
+ * iwl_ext_dts_measurement_cmd - request extended DTS temperature measurements
+ * @control_mode: see &enum iwl_dts_control_measurement_mode
+ * @temperature: used when over write DTS mode is selected
+ * @sensor: set temperature sensor to use. See &enum iwl_dts_used
+ * @avg_factor: average factor to DTS in request DTS read mode
+ * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode
+ * @step_duration: step duration for the DTS
+ */
+struct iwl_ext_dts_measurement_cmd {
+	__le32 control_mode;
+	__le32 temperature;
+	__le32 sensor;
+	__le32 avg_factor;
+	__le32 bit_mode;
+	__le32 step_duration;
+} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */
+
+/**
  * iwl_dts_measurement_notif - notification received with the measurements
  *
  * @temp: the measured temperature
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 4a0ce83..d906fa1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -616,12 +616,8 @@
 	 * will be empty.
 	 */
 
-	for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
-		if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE)
-			mvm->queue_to_mac80211[i] = i;
-		else
-			mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
-	}
+	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
+	mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
 
 	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
 		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@@ -703,7 +699,7 @@
 	 * abort after reading the nvm in case RF Kill is on, we will complete
 	 * the init seq later when RF kill will switch to off
 	 */
-	if (iwl_mvm_is_radio_killed(mvm)) {
+	if (iwl_mvm_is_radio_hw_killed(mvm)) {
 		IWL_DEBUG_RF_KILL(mvm,
 				  "jump over all phy activities due to RF kill\n");
 		iwl_remove_notification(&mvm->notif_wait, &calib_wait);
@@ -736,7 +732,7 @@
 	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
 			MVM_UCODE_CALIB_TIMEOUT);
 
-	if (ret && iwl_mvm_is_radio_killed(mvm)) {
+	if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
 		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
 		ret = 1;
 	}
@@ -940,19 +936,6 @@
 	return ret;
 }
 
-static int iwl_mvm_config_ltr_v1(struct iwl_mvm *mvm)
-{
-	struct iwl_ltr_config_cmd_v1 cmd_v1 = {
-		.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
-	};
-
-	if (!mvm->trans->ltr_enabled)
-		return 0;
-
-	return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
-				    sizeof(cmd_v1), &cmd_v1);
-}
-
 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
 {
 	struct iwl_ltr_config_cmd cmd = {
@@ -962,9 +945,6 @@
 	if (!mvm->trans->ltr_enabled)
 		return 0;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_HDC_PHASE_0))
-		return iwl_mvm_config_ltr_v1(mvm);
-
 	return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
 				    sizeof(cmd), &cmd);
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 3424315..ad7ad72 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -484,16 +486,18 @@
 	switch (vif->type) {
 	case NL80211_IFTYPE_P2P_DEVICE:
 		iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
-				      IWL_MVM_TX_FIFO_VO, wdg_timeout);
+				      IWL_MVM_OFFCHANNEL_QUEUE,
+				      IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
 		break;
 	case NL80211_IFTYPE_AP:
-		iwl_mvm_enable_ac_txq(mvm, vif->cab_queue,
-				      IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
+		iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
+				      IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
 		/* fall through */
 	default:
 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
 			iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
-					      iwl_mvm_ac_to_tx_fifo[ac],
+					      vif->hw_queue[ac],
+					      iwl_mvm_ac_to_tx_fifo[ac], 0,
 					      wdg_timeout);
 		break;
 	}
@@ -509,14 +513,19 @@
 
 	switch (vif->type) {
 	case NL80211_IFTYPE_P2P_DEVICE:
-		iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, 0);
+		iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
+				    IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT,
+				    0);
 		break;
 	case NL80211_IFTYPE_AP:
-		iwl_mvm_disable_txq(mvm, vif->cab_queue, 0);
+		iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
+				    IWL_MAX_TID_COUNT, 0);
 		/* fall through */
 	default:
 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
-			iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], 0);
+			iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
+					    vif->hw_queue[ac],
+					    IWL_MAX_TID_COUNT, 0);
 	}
 }
 
@@ -834,6 +843,9 @@
 	ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
 	ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
 
+	if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
+		cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+
 	return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
 }
 
@@ -1128,6 +1140,7 @@
 				   struct ieee80211_vif *vif,
 				   u32 action)
 {
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mac_ctx_cmd cmd = {};
 
 	WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p);
@@ -1137,10 +1150,16 @@
 
 	/*
 	 * pass probe requests and beacons from other APs (needed
-	 * for ht protection)
+	 * for ht protection); when there're no any associated station
+	 * don't ask FW to pass beacons to prevent unnecessary wake-ups.
 	 */
-	cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
-					MAC_FILTER_IN_BEACON);
+	cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+	if (mvmvif->ap_assoc_sta_count) {
+		cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+		IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
+	} else {
+		IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
+	}
 
 	/* Fill the data specific for ap mode */
 	iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index aa8c2b7..1fb6846 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -572,6 +572,14 @@
 	/* we create the 802.11 header and zero length SSID IE. */
 	hw->wiphy->max_sched_scan_ie_len =
 		SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+	hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
+	hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
+
+	/*
+	 * the firmware uses u8 for num of iterations, but 0xff is saved for
+	 * infinite loop, so the maximum number of iterations is actually 254.
+	 */
+	hw->wiphy->max_sched_scan_plan_iterations = 254;
 
 	hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
 			       NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -820,7 +828,7 @@
 				    struct ieee80211_vif *vif,
 				    enum ieee80211_ampdu_mlme_action action,
 				    struct ieee80211_sta *sta, u16 tid,
-				    u16 *ssn, u8 buf_size)
+				    u16 *ssn, u8 buf_size, bool amsdu)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 	int ret;
@@ -1129,6 +1137,12 @@
 
 	lockdep_assert_held(&mvm->mutex);
 
+	/* there's no point in fw dump if the bus is dead */
+	if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
+		IWL_ERR(mvm, "Skip fw error dump since bus is dead\n");
+		return;
+	}
+
 	if (mvm->fw_dump_trig &&
 	    mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
 		monitor_dump_only = true;
@@ -1192,6 +1206,13 @@
 	if (sram2_len)
 		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
 
+	/* Make room for fw's virtual image pages, if it exists */
+	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
+		file_len += mvm->num_of_paging_blk *
+			(sizeof(*dump_data) +
+			 sizeof(struct iwl_fw_error_dump_paging) +
+			 PAGING_BLOCK_SIZE);
+
 	/* If we only want a monitor dump, reset the file length */
 	if (monitor_dump_only) {
 		file_len = sizeof(*dump_file) + sizeof(*dump_data) +
@@ -1302,6 +1323,26 @@
 					 dump_mem->data, IWL8260_ICCM_LEN);
 	}
 
+	/* Dump fw's virtual image */
+	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
+		u32 i;
+
+		for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
+			struct iwl_fw_error_dump_paging *paging;
+			struct page *pages =
+				mvm->fw_paging_db[i].fw_paging_block;
+
+			dump_data = iwl_fw_error_next_data(dump_data);
+			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
+			dump_data->len = cpu_to_le32(sizeof(*paging) +
+						     PAGING_BLOCK_SIZE);
+			paging = (void *)dump_data->data;
+			paging->index = cpu_to_le32(i);
+			memcpy(paging->data, page_address(pages),
+			       PAGING_BLOCK_SIZE);
+		}
+	}
+
 dump_trans_data:
 	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
 						       mvm->fw_dump_trig);
@@ -1577,20 +1618,6 @@
 	return NULL;
 }
 
-static int iwl_mvm_set_tx_power_old(struct iwl_mvm *mvm,
-				    struct ieee80211_vif *vif, s8 tx_power)
-{
-	/* FW is in charge of regulatory enforcement */
-	struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
-		.mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
-		.pwr_restriction = cpu_to_le16(tx_power),
-	};
-
-	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
-				    sizeof(reduce_txpwr_cmd),
-				    &reduce_txpwr_cmd);
-}
-
 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 				s16 tx_power)
 {
@@ -1602,9 +1629,6 @@
 	};
 	int len = sizeof(cmd);
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
-		return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
-
 	if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
 		cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
 
@@ -1771,7 +1795,7 @@
 		 * Flush them here.
 		 */
 		mutex_lock(&mvm->mutex);
-		iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
+		iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
 		mutex_unlock(&mvm->mutex);
 
 		/*
@@ -1972,6 +1996,27 @@
 	*total_flags = 0;
 }
 
+static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
+					struct ieee80211_vif *vif,
+					unsigned int filter_flags,
+					unsigned int changed_flags)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	/* We support only filter for probe requests */
+	if (!(changed_flags & FIF_PROBE_REQ))
+		return;
+
+	/* Supported only for p2p client interfaces */
+	if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
+	    !vif->p2p)
+		return;
+
+	mutex_lock(&mvm->mutex);
+	iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+	mutex_unlock(&mvm->mutex);
+}
+
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
 struct iwl_bcast_iter_data {
 	struct iwl_mvm *mvm;
@@ -2319,6 +2364,8 @@
 	if (vif->type == NL80211_IFTYPE_AP)
 		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
 
+	mvmvif->ap_assoc_sta_count = 0;
+
 	/* Add the mac context */
 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
 	if (ret)
@@ -2388,6 +2435,7 @@
 		iwl_mvm_remove_time_event(mvm, mvmvif,
 					  &mvmvif->time_event_data);
 		RCU_INIT_POINTER(mvm->csa_vif, NULL);
+		mvmvif->csa_countdown = false;
 	}
 
 	if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
@@ -2613,6 +2661,7 @@
 				       struct ieee80211_sta *sta)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
 
 	/*
@@ -2627,6 +2676,12 @@
 	if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
 		rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
 				   ERR_PTR(-ENOENT));
+
+	if (mvm_sta->vif->type == NL80211_IFTYPE_AP) {
+		mvmvif->ap_assoc_sta_count--;
+		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+	}
+
 	mutex_unlock(&mvm->mutex);
 }
 
@@ -3905,7 +3960,7 @@
 	}
 
 	if (drop) {
-		if (iwl_mvm_flush_tx_path(mvm, msk, true))
+		if (iwl_mvm_flush_tx_path(mvm, msk, 0))
 			IWL_ERR(mvm, "flush request fail\n");
 		mutex_unlock(&mvm->mutex);
 	} else {
@@ -4142,6 +4197,7 @@
 	.config = iwl_mvm_mac_config,
 	.prepare_multicast = iwl_mvm_prepare_multicast,
 	.configure_filter = iwl_mvm_configure_filter,
+	.config_iface_filter = iwl_mvm_config_iface_filter,
 	.bss_info_changed = iwl_mvm_bss_info_changed,
 	.hw_scan = iwl_mvm_mac_hw_scan,
 	.cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index b95a07e..c6327cd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -82,7 +82,6 @@
 #include "constants.h"
 #include "tof.h"
 
-#define IWL_INVALID_MAC80211_QUEUE	0xff
 #define IWL_MVM_MAX_ADDRESSES		5
 /* RSSI offset for WkP */
 #define IWL_RSSI_OFFSET 50
@@ -323,11 +322,11 @@
 struct iwl_mvm_vif_bf_data {
 	bool bf_enabled;
 	bool ba_enabled;
-	s8 ave_beacon_signal;
-	s8 last_cqm_event;
-	s8 bt_coex_min_thold;
-	s8 bt_coex_max_thold;
-	s8 last_bt_coex_event;
+	int ave_beacon_signal;
+	int last_cqm_event;
+	int bt_coex_min_thold;
+	int bt_coex_max_thold;
+	int last_bt_coex_event;
 };
 
 /**
@@ -338,6 +337,8 @@
  * @bssid: BSSID for this (client) interface
  * @associated: indicates that we're currently associated, used only for
  *	managing the firmware state in iwl_mvm_bss_info_changed_station()
+ * @ap_assoc_sta_count: count of stations associated to us - valid only
+ *	if VIF type is AP
  * @uploaded: indicates the MAC context has been added to the device
  * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
  *	should get quota etc.
@@ -367,6 +368,7 @@
 
 	u8 bssid[ETH_ALEN];
 	bool associated;
+	u8 ap_assoc_sta_count;
 
 	bool uploaded;
 	bool ap_ibss_active;
@@ -602,7 +604,14 @@
 		u64 on_time_scan;
 	} radio_stats, accu_radio_stats;
 
-	u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
+	struct {
+		/* Map to HW queue */
+		u32 hw_queue_to_mac80211;
+		u8 hw_queue_refcount;
+		bool setup_reserved;
+		u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
+	} queue_info[IWL_MAX_HW_QUEUES];
+	spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
 	atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
 
 	const char *nvm_file_name;
@@ -679,6 +688,7 @@
 	struct debugfs_blob_wrapper nvm_sw_blob;
 	struct debugfs_blob_wrapper nvm_calib_blob;
 	struct debugfs_blob_wrapper nvm_prod_blob;
+	struct debugfs_blob_wrapper nvm_phy_sku_blob;
 
 	struct iwl_mvm_frame_stats drv_rx_stats;
 	spinlock_t drv_stats_lock;
@@ -860,6 +870,11 @@
 	       test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
 }
 
+static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
+{
+	return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+}
+
 /* Must be called with rcu_read_lock() held and it can only be
  * released when mvmsta is not needed anymore.
  */
@@ -907,6 +922,12 @@
 			   IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
 }
 
+static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
+{
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
+}
+
 static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
 {
 	bool nvm_lar = mvm->nvm_data->lar_enabled;
@@ -934,11 +955,6 @@
 			   IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
 }
 
-static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
-{
-	return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCD_CFG);
-}
-
 static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
 {
 	return fw_has_capa(&mvm->fw->ucode_capa,
@@ -959,6 +975,12 @@
 			   IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
 }
 
+static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
+{
+	/* firmware flag isn't defined yet */
+	return false;
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -1014,7 +1036,7 @@
 #else
 static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
 #endif
-int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
 
 static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
@@ -1131,7 +1153,6 @@
 				    struct ieee80211_vif *vif);
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
 					 struct ieee80211_vif *exclude_vif);
-
 /* Bindings */
 int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -1344,14 +1365,20 @@
 }
 
 /* hw scheduler queue config */
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
-			const struct iwl_trans_txq_scd_cfg *cfg,
+void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
 			unsigned int wdg_timeout);
-void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags);
+/*
+ * Disable a TXQ.
+ * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
+ */
+void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			 u8 tid, u8 flags);
+int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq);
 
 static inline
-void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
-			   u8 fifo, unsigned int wdg_timeout)
+void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			   u8 fifo, u16 ssn, unsigned int wdg_timeout)
 {
 	struct iwl_trans_txq_scd_cfg cfg = {
 		.fifo = fifo,
@@ -1360,13 +1387,13 @@
 		.frame_limit = IWL_FRAME_LIMIT,
 	};
 
-	iwl_mvm_enable_txq(mvm, queue, 0, &cfg, wdg_timeout);
+	iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
 }
 
 static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
-					  int fifo, int sta_id, int tid,
-					  int frame_limit, u16 ssn,
-					  unsigned int wdg_timeout)
+					  int mac80211_queue, int fifo,
+					  int sta_id, int tid, int frame_limit,
+					  u16 ssn, unsigned int wdg_timeout)
 {
 	struct iwl_trans_txq_scd_cfg cfg = {
 		.fifo = fifo,
@@ -1376,7 +1403,7 @@
 		.aggregate = true,
 	};
 
-	iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout);
+	iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
 }
 
 /* Thermal management and CT-kill */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 328187d..2ee0f6f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -316,7 +316,8 @@
 	return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
 				  regulatory, mac_override, phy_sku,
 				  mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
-				  lar_enabled, mac_addr0, mac_addr1);
+				  lar_enabled, mac_addr0, mac_addr1,
+				  mvm->trans->hw_id);
 }
 
 #define MAX_NVM_FILE_LEN	16384
@@ -482,6 +483,7 @@
 			ret = -ENOMEM;
 			break;
 		}
+		kfree(mvm->nvm_sections[section_id].data);
 		mvm->nvm_sections[section_id].data = temp;
 		mvm->nvm_sections[section_id].length = section_size;
 
@@ -563,6 +565,10 @@
 				mvm->nvm_prod_blob.data = temp;
 				mvm->nvm_prod_blob.size  = ret;
 				break;
+			case NVM_SECTION_TYPE_PHY_SKU:
+				mvm->nvm_phy_sku_blob.data = temp;
+				mvm->nvm_phy_sku_blob.size  = ret;
+				break;
 			default:
 				if (section == mvm->cfg->nvm_hw_section_num) {
 					mvm->nvm_hw_blob.data = temp;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index a37de3f..13c97f6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -89,6 +89,7 @@
 MODULE_LICENSE("GPL");
 
 static const struct iwl_op_mode_ops iwl_mvm_ops;
+static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
 
 struct iwl_mvm_mod_params iwlmvm_mod_params = {
 	.power_scheme = IWL_POWER_SCHEME_BPS,
@@ -222,7 +223,6 @@
  * called from a worker with mvm->mutex held.
  */
 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
-	RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
 	RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
 	RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
 
@@ -257,6 +257,8 @@
 	RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
 		   iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
 	RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
+	RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
+		       iwl_mvm_temp_notif, true),
 
 	RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
 		   true),
@@ -271,6 +273,7 @@
 static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = {
 	CMD(MVM_ALIVE),
 	CMD(REPLY_ERROR),
+	CMD(ECHO_CMD),
 	CMD(INIT_COMPLETE_NOTIF),
 	CMD(PHY_CONTEXT_CMD),
 	CMD(MGMT_MCAST_KEY),
@@ -422,7 +425,6 @@
 		hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
 
 	op_mode = hw->priv;
-	op_mode->ops = &iwl_mvm_ops;
 
 	mvm = IWL_OP_MODE_GET_MVM(op_mode);
 	mvm->dev = trans->dev;
@@ -431,6 +433,15 @@
 	mvm->fw = fw;
 	mvm->hw = hw;
 
+	if (iwl_mvm_has_new_rx_api(mvm)) {
+		op_mode->ops = &iwl_mvm_ops_mq;
+	} else {
+		op_mode->ops = &iwl_mvm_ops;
+
+		if (WARN_ON(trans->num_rx_queues > 1))
+			goto out_free;
+	}
+
 	mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
 
 	mvm->aux_queue = 15;
@@ -451,6 +462,7 @@
 	INIT_LIST_HEAD(&mvm->aux_roc_te_list);
 	INIT_LIST_HEAD(&mvm->async_handlers_list);
 	spin_lock_init(&mvm->time_event_lock);
+	spin_lock_init(&mvm->queue_info_lock);
 
 	INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
 	INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
@@ -590,6 +602,7 @@
 	ieee80211_unregister_hw(mvm->hw);
 	iwl_mvm_leds_exit(mvm);
  out_free:
+	flush_delayed_work(&mvm->fw_dump_wk);
 	iwl_phy_db_free(mvm->phy_db);
 	kfree(mvm->scan_cmd);
 	if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
@@ -617,6 +630,7 @@
 	kfree(mvm->d3_resume_sram);
 	if (mvm->nd_config) {
 		kfree(mvm->nd_config->match_sets);
+		kfree(mvm->nd_config->scan_plans);
 		kfree(mvm->nd_config);
 		mvm->nd_config = NULL;
 	}
@@ -716,18 +730,11 @@
 	}
 }
 
-static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
-				struct napi_struct *napi,
-				struct iwl_rx_cmd_buffer *rxb)
+static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb,
+			      struct iwl_rx_packet *pkt)
 {
-	struct iwl_rx_packet *pkt = rxb_addr(rxb);
-	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-	u8 i;
-
-	if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
-		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
-		return;
-	}
+	int i;
 
 	iwl_mvm_rx_check_trigger(mvm, pkt);
 
@@ -767,40 +774,84 @@
 	}
 }
 
+static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
+		       struct napi_struct *napi,
+		       struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+	if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
+		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+	else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
+		iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
+	else
+		iwl_mvm_rx_common(mvm, rxb, pkt);
+}
+
+static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
+			  struct napi_struct *napi,
+			  struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+	if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
+		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+	else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
+		iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
+	else
+		iwl_mvm_rx_common(mvm, rxb, pkt);
+}
+
 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
 {
 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-	int mq = mvm->queue_to_mac80211[queue];
+	unsigned long mq;
+	int q;
 
-	if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+	spin_lock_bh(&mvm->queue_info_lock);
+	mq = mvm->queue_info[queue].hw_queue_to_mac80211;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	if (WARN_ON_ONCE(!mq))
 		return;
 
-	if (atomic_inc_return(&mvm->mac80211_queue_stop_count[mq]) > 1) {
-		IWL_DEBUG_TX_QUEUES(mvm,
-				    "queue %d (mac80211 %d) already stopped\n",
-				    queue, mq);
-		return;
+	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
+		if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
+			IWL_DEBUG_TX_QUEUES(mvm,
+					    "queue %d (mac80211 %d) already stopped\n",
+					    queue, q);
+			continue;
+		}
+
+		ieee80211_stop_queue(mvm->hw, q);
 	}
-
-	ieee80211_stop_queue(mvm->hw, mq);
 }
 
 static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
 {
 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-	int mq = mvm->queue_to_mac80211[queue];
+	unsigned long mq;
+	int q;
 
-	if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+	spin_lock_bh(&mvm->queue_info_lock);
+	mq = mvm->queue_info[queue].hw_queue_to_mac80211;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	if (WARN_ON_ONCE(!mq))
 		return;
 
-	if (atomic_dec_return(&mvm->mac80211_queue_stop_count[mq]) > 0) {
-		IWL_DEBUG_TX_QUEUES(mvm,
-				    "queue %d (mac80211 %d) still stopped\n",
-				    queue, mq);
-		return;
+	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
+		if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
+			IWL_DEBUG_TX_QUEUES(mvm,
+					    "queue %d (mac80211 %d) still stopped\n",
+					    queue, q);
+			continue;
+		}
+
+		ieee80211_wake_queue(mvm->hw, q);
 	}
-
-	ieee80211_wake_queue(mvm->hw, mq);
 }
 
 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
@@ -1145,12 +1196,17 @@
 	/* make sure we have no running tx while configuring the seqno */
 	synchronize_net();
 
-	iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
-	ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
-				   sizeof(wowlan_config_cmd),
-				   &wowlan_config_cmd);
-	if (ret)
-		return ret;
+	/* configure wowlan configuration only if needed */
+	if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
+		iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
+					&d0i3_iter_data);
+
+		ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
+					   sizeof(wowlan_config_cmd),
+					   &wowlan_config_cmd);
+		if (ret)
+			return ret;
+	}
 
 	return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
 				    flags | CMD_MAKE_TRANS_IDLE,
@@ -1257,7 +1313,7 @@
 	};
 	struct iwl_wowlan_status *status;
 	int ret;
-	u32 handled_reasons, wakeup_reasons;
+	u32 handled_reasons, wakeup_reasons = 0;
 	__le16 *qos_seq = NULL;
 
 	mutex_lock(&mvm->mutex);
@@ -1289,6 +1345,9 @@
 out:
 	iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
 
+	IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
+		       wakeup_reasons);
+
 	/* qos_seq might point inside resp_pkt, so free it only now */
 	if (get_status_cmd.resp_pkt)
 		iwl_free_resp(&get_status_cmd);
@@ -1338,17 +1397,38 @@
 	return _iwl_mvm_exit_d0i3(mvm);
 }
 
+#define IWL_MVM_COMMON_OPS					\
+	/* these could be differentiated */			\
+	.queue_full = iwl_mvm_stop_sw_queue,			\
+	.queue_not_full = iwl_mvm_wake_sw_queue,		\
+	.hw_rf_kill = iwl_mvm_set_hw_rfkill_state,		\
+	.free_skb = iwl_mvm_free_skb,				\
+	.nic_error = iwl_mvm_nic_error,				\
+	.cmd_queue_full = iwl_mvm_cmd_queue_full,		\
+	.nic_config = iwl_mvm_nic_config,			\
+	.enter_d0i3 = iwl_mvm_enter_d0i3,			\
+	.exit_d0i3 = iwl_mvm_exit_d0i3,				\
+	/* as we only register one, these MUST be common! */	\
+	.start = iwl_op_mode_mvm_start,				\
+	.stop = iwl_op_mode_mvm_stop
+
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
-	.start = iwl_op_mode_mvm_start,
-	.stop = iwl_op_mode_mvm_stop,
-	.rx = iwl_mvm_rx_dispatch,
-	.queue_full = iwl_mvm_stop_sw_queue,
-	.queue_not_full = iwl_mvm_wake_sw_queue,
-	.hw_rf_kill = iwl_mvm_set_hw_rfkill_state,
-	.free_skb = iwl_mvm_free_skb,
-	.nic_error = iwl_mvm_nic_error,
-	.cmd_queue_full = iwl_mvm_cmd_queue_full,
-	.nic_config = iwl_mvm_nic_config,
-	.enter_d0i3 = iwl_mvm_enter_d0i3,
-	.exit_d0i3 = iwl_mvm_exit_d0i3,
+	IWL_MVM_COMMON_OPS,
+	.rx = iwl_mvm_rx,
+};
+
+static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
+			      struct napi_struct *napi,
+			      struct iwl_rx_cmd_buffer *rxb,
+			      unsigned int queue)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+	iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+}
+
+static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
+	IWL_MVM_COMMON_OPS,
+	.rx = iwl_mvm_rx_mq,
+	.rx_rss = iwl_mvm_rx_mq_rss,
 };
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 4645877..bed9696 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -306,13 +308,51 @@
 	return radar_detect;
 }
 
+static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
+					   struct ieee80211_vif *vif,
+					   struct iwl_mac_power_cmd *cmd,
+					   bool host_awake)
+{
+	int dtimper = vif->bss_conf.dtim_period ?: 1;
+	int skip;
+
+	/* disable, in case we're supposed to override */
+	cmd->skip_dtim_periods = 0;
+	cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+
+	if (iwl_mvm_power_is_radar(vif))
+		return;
+
+	if (dtimper >= 10)
+		return;
+
+	/* TODO: check that multicast wake lock is off */
+
+	if (host_awake) {
+		if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
+			return;
+		skip = 2;
+	} else {
+		int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
+
+		if (WARN_ON(!dtimper_tu))
+			return;
+		/* configure skip over dtim up to 306TU - 314 msec */
+		skip = max_t(u8, 1, 306 / dtimper_tu);
+	}
+
+	/* the firmware really expects "look at every X DTIMs", so add 1 */
+	cmd->skip_dtim_periods = 1 + skip;
+	cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+}
+
 static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 				    struct ieee80211_vif *vif,
-				    struct iwl_mac_power_cmd *cmd)
+				    struct iwl_mac_power_cmd *cmd,
+				    bool host_awake)
 {
 	int dtimper, bi;
 	int keep_alive;
-	bool radar_detect = false;
 	struct iwl_mvm_vif *mvmvif __maybe_unused =
 		iwl_mvm_vif_from_mac80211(vif);
 
@@ -337,8 +377,13 @@
 
 	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
-	if (!vif->bss_conf.ps || !mvmvif->pm_enabled ||
-	    (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p))
+	if (!vif->bss_conf.ps || !mvmvif->pm_enabled)
+		return;
+
+	if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
+	    (!fw_has_capa(&mvm->fw->ucode_capa,
+			 IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) ||
+	     !IWL_MVM_P2P_LOWLATENCY_PS_ENABLE))
 		return;
 
 	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -350,27 +395,25 @@
 		cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
 	}
 
-	/* Check if radar detection is required on current channel */
-	radar_detect = iwl_mvm_power_is_radar(vif);
+	iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake);
 
-	/* Check skip over DTIM conditions */
-	if (!radar_detect && (dtimper < 10) &&
-	    (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
-	     mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
-		cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
-		cmd->skip_dtim_periods = 3;
-	}
-
-	if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
-		cmd->rx_data_timeout =
-			cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
-		cmd->tx_data_timeout =
-			cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
-	} else {
+	if (!host_awake) {
 		cmd->rx_data_timeout =
 			cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
 		cmd->tx_data_timeout =
 			cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+	} else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
+		   fw_has_capa(&mvm->fw->ucode_capa,
+			       IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) {
+		cmd->tx_data_timeout =
+			cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT);
+		cmd->rx_data_timeout =
+			cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT);
+	} else {
+		cmd->rx_data_timeout =
+			cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
+		cmd->tx_data_timeout =
+			cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
 	}
 
 	if (iwl_mvm_power_allow_uapsd(mvm, vif))
@@ -427,7 +470,8 @@
 {
 	struct iwl_mac_power_cmd cmd = {};
 
-	iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+	iwl_mvm_power_build_cmd(mvm, vif, &cmd,
+				mvm->cur_ucode != IWL_UCODE_WOWLAN);
 	iwl_mvm_power_log(mvm, &cmd);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
@@ -440,14 +484,14 @@
 int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
 {
 	struct iwl_device_power_cmd cmd = {
-		.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
+		.flags = 0,
 	};
 
 	if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
 		mvm->ps_disabled = true;
 
-	if (mvm->ps_disabled)
-		cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
+	if (!mvm->ps_disabled)
+		cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
@@ -963,25 +1007,8 @@
 	if (!vif->bss_conf.assoc)
 		return 0;
 
-	iwl_mvm_power_build_cmd(mvm, vif, &cmd);
-	if (enable) {
-		/* configure skip over dtim up to 306TU - 314 msec */
-		int dtimper = vif->bss_conf.dtim_period ?: 1;
-		int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
-		bool radar_detect = iwl_mvm_power_is_radar(vif);
+	iwl_mvm_power_build_cmd(mvm, vif, &cmd, !enable);
 
-		if (WARN_ON(!dtimper_tu))
-			return 0;
-
-		/* Check skip over DTIM conditions */
-		/* TODO: check that multicast wake lock is off */
-		if (!radar_detect && (dtimper < 10)) {
-			cmd.skip_dtim_periods = 306 / dtimper_tu;
-			if (cmd.skip_dtim_periods)
-				cmd.flags |= cpu_to_le16(
-					POWER_FLAGS_SKIP_OVER_DTIM_MSK);
-		}
-	}
 	iwl_mvm_power_log(mvm, &cmd);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 5ae9c8a..d1ad103 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -177,9 +177,6 @@
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-	if (IWL_MVM_RS_DISABLE_P2P_MIMO &&
-	    iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
-		return false;
 
 	if (mvm->nvm_data->sku_cap_mimo_disabled)
 		return false;
@@ -524,14 +521,56 @@
 	return lq_types[type];
 }
 
+static char *rs_pretty_rate(const struct rs_rate *rate)
+{
+	static char buf[40];
+	static const char * const legacy_rates[] = {
+		[IWL_RATE_1M_INDEX] = "1M",
+		[IWL_RATE_2M_INDEX] = "2M",
+		[IWL_RATE_5M_INDEX] = "5.5M",
+		[IWL_RATE_11M_INDEX] = "11M",
+		[IWL_RATE_6M_INDEX] = "6M",
+		[IWL_RATE_9M_INDEX] = "9M",
+		[IWL_RATE_12M_INDEX] = "12M",
+		[IWL_RATE_18M_INDEX] = "18M",
+		[IWL_RATE_24M_INDEX] = "24M",
+		[IWL_RATE_36M_INDEX] = "36M",
+		[IWL_RATE_48M_INDEX] = "48M",
+		[IWL_RATE_54M_INDEX] = "54M",
+	};
+	static const char *const ht_vht_rates[] = {
+		[IWL_RATE_MCS_0_INDEX] = "MCS0",
+		[IWL_RATE_MCS_1_INDEX] = "MCS1",
+		[IWL_RATE_MCS_2_INDEX] = "MCS2",
+		[IWL_RATE_MCS_3_INDEX] = "MCS3",
+		[IWL_RATE_MCS_4_INDEX] = "MCS4",
+		[IWL_RATE_MCS_5_INDEX] = "MCS5",
+		[IWL_RATE_MCS_6_INDEX] = "MCS6",
+		[IWL_RATE_MCS_7_INDEX] = "MCS7",
+		[IWL_RATE_MCS_8_INDEX] = "MCS8",
+		[IWL_RATE_MCS_9_INDEX] = "MCS9",
+	};
+	const char *rate_str;
+
+	if (is_type_legacy(rate->type))
+		rate_str = legacy_rates[rate->index];
+	else if (is_type_ht(rate->type) || is_type_vht(rate->type))
+		rate_str = ht_vht_rates[rate->index];
+	else
+		rate_str = "BAD_RATE";
+
+	sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type),
+		rs_pretty_ant(rate->ant), rate_str);
+	return buf;
+}
+
 static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
 				const char *prefix)
 {
 	IWL_DEBUG_RATE(mvm,
-		       "%s: (%s: %d) ANT: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
-		       prefix, rs_pretty_lq_type(rate->type),
-		       rate->index, rs_pretty_ant(rate->ant),
-		       rate->bw, rate->sgi, rate->ldpc, rate->stbc);
+		       "%s: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
+		       prefix, rs_pretty_rate(rate), rate->bw,
+		       rate->sgi, rate->ldpc, rate->stbc);
 }
 
 static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
@@ -562,8 +601,8 @@
 }
 
 static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
-				      struct iwl_lq_sta *lq_data, u8 tid,
-				      struct ieee80211_sta *sta)
+				     struct iwl_lq_sta *lq_data, u8 tid,
+				     struct ieee80211_sta *sta)
 {
 	int ret = -EAGAIN;
 
@@ -1485,7 +1524,7 @@
 	u32 target_tpt;
 	int rate_idx;
 
-	if (success_ratio > IWL_MVM_RS_SR_NO_DECREASE) {
+	if (success_ratio >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
 		target_tpt = 100 * expected_current_tpt;
 		IWL_DEBUG_RATE(mvm,
 			       "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
@@ -1493,7 +1532,7 @@
 	} else {
 		target_tpt = lq_sta->last_tpt;
 		IWL_DEBUG_RATE(mvm,
-			       "SR %d not thag good. Find rate exceeding ACTUAL_TPT %d\n",
+			       "SR %d not that good. Find rate exceeding ACTUAL_TPT %d\n",
 			       success_ratio, target_tpt);
 	}
 
@@ -1622,6 +1661,51 @@
 	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
 }
 
+static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
+			      struct ieee80211_sta *sta,
+			      struct iwl_lq_sta *lq_sta,
+			      struct iwl_scale_tbl_info *tbl,
+			      enum rs_action scale_action)
+{
+	if (sta->bandwidth != IEEE80211_STA_RX_BW_80)
+		return false;
+
+	if (!is_vht_siso(&tbl->rate))
+		return false;
+
+	if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) &&
+	    (tbl->rate.index == IWL_RATE_MCS_0_INDEX) &&
+	    (scale_action == RS_ACTION_DOWNSCALE)) {
+		tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20;
+		tbl->rate.index = IWL_RATE_MCS_4_INDEX;
+		IWL_DEBUG_RATE(mvm, "Switch 80Mhz SISO MCS0 -> 20Mhz MCS4\n");
+		goto tweaked;
+	}
+
+	/* Go back to 80Mhz MCS1 only if we've established that 20Mhz MCS5 is
+	 * sustainable, i.e. we're past the test window. We can't go back
+	 * if MCS5 is just tested as this will happen always after switching
+	 * to 20Mhz MCS4 because the rate stats are cleared.
+	 */
+	if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) &&
+	    (((tbl->rate.index == IWL_RATE_MCS_5_INDEX) &&
+	     (scale_action == RS_ACTION_STAY)) ||
+	     ((tbl->rate.index > IWL_RATE_MCS_5_INDEX) &&
+	      (scale_action == RS_ACTION_UPSCALE)))) {
+		tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80;
+		tbl->rate.index = IWL_RATE_MCS_1_INDEX;
+		IWL_DEBUG_RATE(mvm, "Switch 20Mhz SISO MCS5 -> 80Mhz MCS1\n");
+		goto tweaked;
+	}
+
+	return false;
+
+tweaked:
+	rs_set_expected_tpt_table(lq_sta, tbl);
+	rs_rate_scale_clear_tbl_windows(mvm, tbl);
+	return true;
+}
+
 static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
 					 struct iwl_lq_sta *lq_sta,
 					 struct ieee80211_sta *sta,
@@ -2174,9 +2258,9 @@
 	if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) &&
 	    (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) {
 		IWL_DEBUG_RATE(mvm,
-			       "(%s: %d): Test Window: succ %d total %d\n",
-			       rs_pretty_lq_type(rate->type),
-			       index, window->success_counter, window->counter);
+			       "%s: Test Window: succ %d total %d\n",
+			       rs_pretty_rate(rate),
+			       window->success_counter, window->counter);
 
 		/* Can't calculate this yet; not enough history */
 		window->average_tpt = IWL_INVALID_VALUE;
@@ -2253,8 +2337,8 @@
 		high_tpt = tbl->win[high].average_tpt;
 
 	IWL_DEBUG_RATE(mvm,
-		       "(%s: %d): cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
-		       rs_pretty_lq_type(rate->type), index, current_tpt, sr,
+		       "%s: cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
+		       rs_pretty_rate(rate), current_tpt, sr,
 		       low, high, low_tpt, high_tpt);
 
 	scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
@@ -2305,6 +2389,8 @@
 	/* Replace uCode's rate table for the destination station. */
 	if (update_lq) {
 		tbl->rate.index = index;
+		if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
+			rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
 		rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
 	}
 
@@ -2542,7 +2628,6 @@
 		}
 	}
 
-	rs_dump_rate(mvm, rate, "OPTIMAL RATE");
 	return rate;
 }
 
@@ -2983,9 +3068,6 @@
 	else
 		rs_vht_init(mvm, sta, lq_sta, vht_cap);
 
-	if (IWL_MVM_RS_DISABLE_P2P_MIMO && sta_priv->vif->p2p)
-		lq_sta->active_mimo2_rate = 0;
-
 	lq_sta->max_legacy_rate_idx =
 		rs_get_max_rate_from_mask(lq_sta->active_legacy_rate);
 	lq_sta->max_siso_rate_idx =
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index c37c10a..5b58f53 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -202,7 +202,6 @@
 			return -1;
 
 		stats->flag |= RX_FLAG_DECRYPTED;
-		IWL_DEBUG_WEP(mvm, "hw decrypted CCMP successfully\n");
 		*crypt_len = IEEE80211_CCMP_HDR_LEN;
 		return 0;
 
@@ -299,13 +298,6 @@
 		return;
 	}
 
-	if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
-		IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
-			       phy_info->cfg_phy_cnt);
-		kfree_skb(skb);
-		return;
-	}
-
 	/*
 	 * Keep packets with CRC errors (and with overrun) for monitor mode
 	 * (otherwise the firmware discards them) but mark them as bad.
@@ -354,8 +346,8 @@
 	/* This is fine since we don't support multiple AP interfaces */
 	sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
 	if (sta) {
-		struct iwl_mvm_sta *mvmsta;
-		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
 		rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
 
 		if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
@@ -459,7 +451,7 @@
 struct iwl_mvm_stat_data {
 	struct iwl_mvm *mvm;
 	__le32 mac_id;
-	__s8 beacon_filter_average_energy;
+	u8 beacon_filter_average_energy;
 	struct mvm_statistics_general_v8 *general;
 };
 
@@ -577,56 +569,33 @@
 void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
 				  struct iwl_rx_packet *pkt)
 {
-	size_t v8_len = sizeof(struct iwl_notif_statistics_v8);
-	size_t v10_len = sizeof(struct iwl_notif_statistics_v10);
+	struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
 	struct iwl_mvm_stat_data data = {
 		.mvm = mvm,
 	};
 	u32 temperature;
 
-	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STATS_V10)) {
-		struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
+	if (iwl_rx_packet_payload_len(pkt) != sizeof(*stats))
+		goto invalid;
 
-		if (iwl_rx_packet_payload_len(pkt) != v10_len)
-			goto invalid;
+	temperature = le32_to_cpu(stats->general.radio_temperature);
+	data.mac_id = stats->rx.general.mac_id;
+	data.beacon_filter_average_energy =
+		stats->general.beacon_filter_average_energy;
 
-		temperature = le32_to_cpu(stats->general.radio_temperature);
-		data.mac_id = stats->rx.general.mac_id;
-		data.beacon_filter_average_energy =
-			stats->general.beacon_filter_average_energy;
+	iwl_mvm_update_rx_statistics(mvm, &stats->rx);
 
-		iwl_mvm_update_rx_statistics(mvm, &stats->rx);
+	mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
+	mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
+	mvm->radio_stats.on_time_rf =
+		le64_to_cpu(stats->general.on_time_rf);
+	mvm->radio_stats.on_time_scan =
+		le64_to_cpu(stats->general.on_time_scan);
 
-		mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
-		mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
-		mvm->radio_stats.on_time_rf =
-			le64_to_cpu(stats->general.on_time_rf);
-		mvm->radio_stats.on_time_scan =
-			le64_to_cpu(stats->general.on_time_scan);
-
-		data.general = &stats->general;
-	} else {
-		struct iwl_notif_statistics_v8 *stats = (void *)&pkt->data;
-
-		if (iwl_rx_packet_payload_len(pkt) != v8_len)
-			goto invalid;
-
-		temperature = le32_to_cpu(stats->general.radio_temperature);
-		data.mac_id = stats->rx.general.mac_id;
-		data.beacon_filter_average_energy =
-			stats->general.beacon_filter_average_energy;
-
-		iwl_mvm_update_rx_statistics(mvm, &stats->rx);
-	}
+	data.general = &stats->general;
 
 	iwl_mvm_rx_stats_check_trigger(mvm, pkt);
 
-	/* Only handle rx statistics temperature changes if async temp
-	 * notifications are not supported
-	 */
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ASYNC_DTM))
-		iwl_mvm_tt_temp_changed(mvm, temperature);
-
 	ieee80211_iterate_active_interfaces(mvm->hw,
 					    IEEE80211_IFACE_ITER_NORMAL,
 					    iwl_mvm_stat_iterator,
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 56559d4..d6e0c1b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -131,7 +131,6 @@
 	int n_ssids;
 	struct cfg80211_ssid *ssids;
 	struct ieee80211_channel **channels;
-	u16 interval; /* interval between scans (in secs) */
 	u32 flags;
 	u8 *mac_addr;
 	u8 *mac_addr_mask;
@@ -140,7 +139,8 @@
 	int n_match_sets;
 	struct iwl_scan_probe_req preq;
 	struct cfg80211_match_set *match_sets;
-	u8 iterations[2];
+	int n_scan_plans;
+	struct cfg80211_sched_scan_plan *scan_plans;
 };
 
 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
@@ -474,7 +474,7 @@
 	int ret;
 
 	if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
-			return -EIO;
+		return -EIO;
 
 	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
 		blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
@@ -737,8 +737,7 @@
 }
 
 static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
-					struct ieee80211_vif *vif,
-					int n_iterations)
+					struct ieee80211_vif *vif)
 {
 	const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
 
@@ -750,16 +749,9 @@
 	 */
 	return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
 		mvm->last_ebs_successful &&
-		(n_iterations > 1 ||
-		 fw_has_api(capa, IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)) &&
 		vif->type != NL80211_IFTYPE_P2P_DEVICE);
 }
 
-static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
-{
-	return params->iterations[0] + params->iterations[1];
-}
-
 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
 				   struct iwl_mvm_scan_params *params)
 {
@@ -798,12 +790,15 @@
 		(void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
 			 mvm->fw->ucode_capa.n_scan_channels);
 	u32 ssid_bitmap = 0;
-	int n_iterations = iwl_mvm_scan_total_iterations(params);
+	int i;
 
 	lockdep_assert_held(&mvm->mutex);
 
 	memset(cmd, 0, ksize(cmd));
 
+	if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+		return -EINVAL;
+
 	iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
 
 	cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
@@ -823,14 +818,26 @@
 	/* this API uses bits 1-20 instead of 0-19 */
 	ssid_bitmap <<= 1;
 
-	cmd->schedule[0].delay = cpu_to_le16(params->interval);
-	cmd->schedule[0].iterations = params->iterations[0];
-	cmd->schedule[0].full_scan_mul = 1;
-	cmd->schedule[1].delay = cpu_to_le16(params->interval);
-	cmd->schedule[1].iterations = params->iterations[1];
-	cmd->schedule[1].full_scan_mul = 1;
+	for (i = 0; i < params->n_scan_plans; i++) {
+		struct cfg80211_sched_scan_plan *scan_plan =
+			&params->scan_plans[i];
 
-	if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
+		cmd->schedule[i].delay =
+			cpu_to_le16(scan_plan->interval);
+		cmd->schedule[i].iterations = scan_plan->iterations;
+		cmd->schedule[i].full_scan_mul = 1;
+	}
+
+	/*
+	 * If the number of iterations of the last scan plan is set to
+	 * zero, it should run infinitely. However, this is not always the case.
+	 * For example, when regular scan is requested the driver sets one scan
+	 * plan with one iteration.
+	 */
+	if (!cmd->schedule[i - 1].iterations)
+		cmd->schedule[i - 1].iterations = 0xff;
+
+	if (iwl_mvm_scan_use_ebs(mvm, vif)) {
 		cmd->channel_opt[0].flags =
 			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
 				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
@@ -894,7 +901,6 @@
 
 int iwl_mvm_config_scan(struct iwl_mvm *mvm)
 {
-
 	struct iwl_scan_config *scan_config;
 	struct ieee80211_supported_band *band;
 	int num_channels =
@@ -970,6 +976,12 @@
 	return -ENOENT;
 }
 
+static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
+{
+	return params->n_scan_plans == 1 &&
+		params->scan_plans[0].iterations == 1;
+}
+
 static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
 				    struct iwl_scan_req_umac *cmd,
 				    struct iwl_mvm_scan_params *params)
@@ -982,7 +994,7 @@
 	cmd->scan_priority =
 		iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 
-	if (iwl_mvm_scan_total_iterations(params) == 1)
+	if (iwl_mvm_is_regular_scan(params))
 		cmd->ooc_priority =
 			iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 	else
@@ -1029,7 +1041,7 @@
 	else
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
 
-	if (iwl_mvm_scan_total_iterations(params) > 1)
+	if (!iwl_mvm_is_regular_scan(params))
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1047,12 +1059,14 @@
 	struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
 		sizeof(struct iwl_scan_channel_cfg_umac) *
 			mvm->fw->ucode_capa.n_scan_channels;
-	int uid;
+	int uid, i;
 	u32 ssid_bitmap = 0;
-	int n_iterations = iwl_mvm_scan_total_iterations(params);
 
 	lockdep_assert_held(&mvm->mutex);
 
+	if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+		return -EINVAL;
+
 	uid = iwl_mvm_scan_uid_by_status(mvm, 0);
 	if (uid < 0)
 		return uid;
@@ -1069,7 +1083,7 @@
 	if (type == IWL_MVM_SCAN_SCHED)
 		cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
 
-	if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
+	if (iwl_mvm_scan_use_ebs(mvm, vif))
 		cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
 				     IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
 				     IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
@@ -1081,12 +1095,23 @@
 	iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
 				       params->n_channels, ssid_bitmap, cmd);
 
-	/* With UMAC we use only one schedule for now, so use the sum
-	 * of the iterations (with a a maximum of 255).
+	for (i = 0; i < params->n_scan_plans; i++) {
+		struct cfg80211_sched_scan_plan *scan_plan =
+			&params->scan_plans[i];
+
+		sec_part->schedule[i].iter_count = scan_plan->iterations;
+		sec_part->schedule[i].interval =
+			cpu_to_le16(scan_plan->interval);
+	}
+
+	/*
+	 * If the number of iterations of the last scan plan is set to
+	 * zero, it should run infinitely. However, this is not always the case.
+	 * For example, when regular scan is requested the driver sets one scan
+	 * plan with one iteration.
 	 */
-	sec_part->schedule[0].iter_count =
-		(n_iterations > 255) ? 255 : n_iterations;
-	sec_part->schedule[0].interval = cpu_to_le16(params->interval);
+	if (!sec_part->schedule[i - 1].iter_count)
+		sec_part->schedule[i - 1].iter_count = 0xff;
 
 	sec_part->delay = cpu_to_le16(params->delay);
 	sec_part->preq = params->preq;
@@ -1152,6 +1177,7 @@
 	};
 	struct iwl_mvm_scan_params params = {};
 	int ret;
+	struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
 
 	lockdep_assert_held(&mvm->mutex);
 
@@ -1164,8 +1190,6 @@
 	if (ret)
 		return ret;
 
-	iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
-
 	/* we should have failed registration if scan_cmd was NULL */
 	if (WARN_ON(!mvm->scan_cmd))
 		return -ENOMEM;
@@ -1177,7 +1201,6 @@
 	params.flags = req->flags;
 	params.n_channels = req->n_channels;
 	params.delay = 0;
-	params.interval = 0;
 	params.ssids = req->ssids;
 	params.channels = req->channels;
 	params.mac_addr = req->mac_addr;
@@ -1187,8 +1210,8 @@
 	params.n_match_sets = 0;
 	params.match_sets = NULL;
 
-	params.iterations[0] = 1;
-	params.iterations[1] = 0;
+	params.scan_plans = &scan_plan;
+	params.n_scan_plans = 1;
 
 	params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
@@ -1207,21 +1230,20 @@
 		return ret;
 
 	ret = iwl_mvm_send_cmd(mvm, &hcmd);
-	if (!ret) {
-		IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
-		mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
-	} else {
+	if (ret) {
 		/* If the scan failed, it usually means that the FW was unable
 		 * to allocate the time events. Warn on it, but maybe we
 		 * should try to send the command again with different params.
 		 */
 		IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+		return ret;
 	}
 
-	if (ret)
-		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+	IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
+	mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
+	iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
 
-	return ret;
+	return 0;
 }
 
 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
@@ -1267,20 +1289,14 @@
 	params.pass_all =  iwl_mvm_scan_pass_all(mvm, req);
 	params.n_match_sets = req->n_match_sets;
 	params.match_sets = req->match_sets;
+	if (!req->n_scan_plans)
+		return -EINVAL;
 
-	params.iterations[0] = 0;
-	params.iterations[1] = 0xff;
+	params.n_scan_plans = req->n_scan_plans;
+	params.scan_plans = req->scan_plans;
 
 	params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
-	if (req->interval > U16_MAX) {
-		IWL_DEBUG_SCAN(mvm,
-			       "interval value is > 16-bits, set to max possible\n");
-		params.interval = U16_MAX;
-	} else {
-		params.interval = req->interval / MSEC_PER_SEC;
-	}
-
 	/* In theory, LMAC scans can handle a 32-bit delay, but since
 	 * waiting for over 18 hours to start the scan is a bit silly
 	 * and to keep it aligned with UMAC scans (which only support
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index df216cd..300a249 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -234,7 +234,9 @@
 	/* Found a place for all queues - enable them */
 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
 		iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac],
-				      iwl_mvm_ac_to_tx_fifo[ac], wdg_timeout);
+				      mvmsta->hw_queue[ac],
+				      iwl_mvm_ac_to_tx_fifo[ac], 0,
+				      wdg_timeout);
 		mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]);
 	}
 
@@ -253,7 +255,7 @@
 	/* disable the TDLS STA-specific queues */
 	sta_msk = mvmsta->tfd_queue_msk;
 	for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE)
-		iwl_mvm_disable_txq(mvm, i, 0);
+		iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
 }
 
 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
@@ -275,6 +277,11 @@
 	if (sta_id == IWL_MVM_STATION_COUNT)
 		return -ENOSPC;
 
+	if (vif->type == NL80211_IFTYPE_AP) {
+		mvmvif->ap_assoc_sta_count++;
+		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+	}
+
 	spin_lock_init(&mvm_sta->lock);
 
 	mvm_sta->sta_id = sta_id;
@@ -287,7 +294,7 @@
 
 	/* HW restart, don't assume the memory has been zeroed */
 	atomic_set(&mvm->pending_frames[sta_id], 0);
-	mvm_sta->tid_disable_agg = 0;
+	mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
 	mvm_sta->tfd_queue_msk = 0;
 
 	/* allocate new queues for a TDLS station */
@@ -467,7 +474,8 @@
 			unsigned long i, msk = mvm->tfd_drained[sta_id];
 
 			for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE)
-				iwl_mvm_disable_txq(mvm, i, 0);
+				iwl_mvm_disable_txq(mvm, i, i,
+						    IWL_MAX_TID_COUNT, 0);
 
 			mvm->tfd_drained[sta_id] = 0;
 			IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n",
@@ -494,7 +502,7 @@
 		if (ret)
 			return ret;
 		/* flush its queues here since we are freeing mvm_sta */
-		ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
+		ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
 		if (ret)
 			return ret;
 		ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
@@ -646,8 +654,8 @@
 	lockdep_assert_held(&mvm->mutex);
 
 	/* Map Aux queue to fifo - needs to happen before adding Aux station */
-	iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue,
-			      IWL_MVM_TX_FIFO_MCAST, wdg_timeout);
+	iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue,
+			      IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
 
 	/* Allocate aux station and assign to it the aux queue */
 	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
@@ -918,6 +926,7 @@
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_mvm_tid_data *tid_data;
 	int txq_id;
+	int ret;
 
 	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
 		return -EINVAL;
@@ -930,17 +939,6 @@
 
 	lockdep_assert_held(&mvm->mutex);
 
-	for (txq_id = mvm->first_agg_queue;
-	     txq_id <= mvm->last_agg_queue; txq_id++)
-		if (mvm->queue_to_mac80211[txq_id] ==
-		    IWL_INVALID_MAC80211_QUEUE)
-			break;
-
-	if (txq_id > mvm->last_agg_queue) {
-		IWL_ERR(mvm, "Failed to allocate agg queue\n");
-		return -EIO;
-	}
-
 	spin_lock_bh(&mvmsta->lock);
 
 	/* possible race condition - we entered D0i3 while starting agg */
@@ -950,8 +948,18 @@
 		return -EIO;
 	}
 
-	/* the new tx queue is still connected to the same mac80211 queue */
-	mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
+	spin_lock_bh(&mvm->queue_info_lock);
+
+	txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
+					 mvm->last_agg_queue);
+	if (txq_id < 0) {
+		ret = txq_id;
+		spin_unlock_bh(&mvm->queue_info_lock);
+		IWL_ERR(mvm, "Failed to allocate agg queue\n");
+		goto release_locks;
+	}
+	mvm->queue_info[txq_id].setup_reserved = true;
+	spin_unlock_bh(&mvm->queue_info_lock);
 
 	tid_data = &mvmsta->tid_data[tid];
 	tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
@@ -970,9 +978,12 @@
 		tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
 	}
 
+	ret = 0;
+
+release_locks:
 	spin_unlock_bh(&mvmsta->lock);
 
-	return 0;
+	return ret;
 }
 
 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -1000,13 +1011,19 @@
 
 	fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 
-	iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
-			       buf_size, ssn, wdg_timeout);
+	iwl_mvm_enable_agg_txq(mvm, queue,
+			       vif->hw_queue[tid_to_mac80211_ac[tid]], fifo,
+			       mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout);
 
 	ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
 	if (ret)
 		return -EIO;
 
+	/* No need to mark as reserved */
+	spin_lock_bh(&mvm->queue_info_lock);
+	mvm->queue_info[queue].setup_reserved = false;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
 	/*
 	 * Even though in theory the peer could have different
 	 * aggregation reorder buffer sizes for different sessions,
@@ -1051,6 +1068,11 @@
 
 	mvmsta->agg_tids &= ~BIT(tid);
 
+	/* No need to mark as reserved anymore */
+	spin_lock_bh(&mvm->queue_info_lock);
+	mvm->queue_info[txq_id].setup_reserved = false;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
 	switch (tid_data->state) {
 	case IWL_AGG_ON:
 		tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
@@ -1068,14 +1090,15 @@
 
 		tid_data->ssn = 0xffff;
 		tid_data->state = IWL_AGG_OFF;
-		mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
 		spin_unlock_bh(&mvmsta->lock);
 
 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 
 		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
 
-		iwl_mvm_disable_txq(mvm, txq_id, 0);
+		iwl_mvm_disable_txq(mvm, txq_id,
+				    vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
+				    0);
 		return 0;
 	case IWL_AGG_STARTING:
 	case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1086,7 +1109,6 @@
 
 		/* No barriers since we are under mutex */
 		lockdep_assert_held(&mvm->mutex);
-		mvm->queue_to_mac80211[txq_id] = IWL_INVALID_MAC80211_QUEUE;
 
 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 		tid_data->state = IWL_AGG_OFF;
@@ -1127,9 +1149,14 @@
 	mvmsta->agg_tids &= ~BIT(tid);
 	spin_unlock_bh(&mvmsta->lock);
 
+	/* No need to mark as reserved */
+	spin_lock_bh(&mvm->queue_info_lock);
+	mvm->queue_info[txq_id].setup_reserved = false;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
 	if (old_state >= IWL_AGG_ON) {
 		iwl_mvm_drain_sta(mvm, mvmsta, true);
-		if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
+		if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
 			IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
 		iwl_trans_wait_tx_queue_empty(mvm->trans,
 					      mvmsta->tfd_queue_msk);
@@ -1137,12 +1164,11 @@
 
 		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
 
-		iwl_mvm_disable_txq(mvm, tid_data->txq_id, 0);
+		iwl_mvm_disable_txq(mvm, tid_data->txq_id,
+				    vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
+				    0);
 	}
 
-	mvm->queue_to_mac80211[tid_data->txq_id] =
-				IWL_INVALID_MAC80211_QUEUE;
-
 	return 0;
 }
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index dbd7d54..7530eb2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -129,7 +129,7 @@
 	 * issue as it will have to complete before the next command is
 	 * executed, and a new time event means a new command.
 	 */
-	iwl_mvm_flush_tx_path(mvm, queues, false);
+	iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
 }
 
 static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/iwlwifi/mvm/tof.c
index 380972f..4007f1d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tof.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tof.c
@@ -178,12 +178,14 @@
 	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
 		return -EINVAL;
 
-	if (vif->p2p || vif->type != NL80211_IFTYPE_AP) {
+	if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
+	    !mvmvif->ap_ibss_active) {
 		IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
 		return -EIO;
 	}
 
 	cmd->sta_id = mvmvif->bcast_sta.sta_id;
+	memcpy(cmd->bssid, vif->addr, ETH_ALEN);
 	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
 						    IWL_ALWAYS_LONG_GROUP, 0),
 				    0, sizeof(*cmd), cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/iwlwifi/mvm/tof.h
index 50ae8ad..9beebc3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tof.h
+++ b/drivers/net/wireless/iwlwifi/mvm/tof.h
@@ -60,7 +60,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
-#ifndef __tof
+#ifndef __tof_h__
 #define __tof_h__
 
 #include "fw-api-tof.h"
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index fe7145c..cadfc04 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -176,17 +176,34 @@
 	struct iwl_dts_measurement_cmd cmd = {
 		.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
 	};
+	struct iwl_ext_dts_measurement_cmd extcmd = {
+		.control_mode = cpu_to_le32(DTS_AUTOMATIC),
+	};
+	u32 cmdid;
 
-	return iwl_mvm_send_cmd_pdu(mvm, CMD_DTS_MEASUREMENT_TRIGGER, 0,
-				    sizeof(cmd), &cmd);
+	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
+		cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+				   PHY_OPS_GROUP, 0);
+	else
+		cmdid = CMD_DTS_MEASUREMENT_TRIGGER;
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa,
+			 IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE))
+		return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd);
+
+	return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd);
 }
 
 int iwl_mvm_get_temp(struct iwl_mvm *mvm)
 {
 	struct iwl_notification_wait wait_temp_notif;
-	static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
+	static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
+					    DTS_MEASUREMENT_NOTIF_WIDE) };
 	int ret, temp;
 
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
+		temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION;
+
 	lockdep_assert_held(&mvm->mutex);
 
 	iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 6df5aad..c652a66 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -560,15 +560,10 @@
 		IWL_DEBUG_TX_QUEUES(mvm,
 				    "Can continue DELBA flow ssn = next_recl = %d\n",
 				    tid_data->next_reclaimed);
-		iwl_mvm_disable_txq(mvm, tid_data->txq_id, CMD_ASYNC);
+		iwl_mvm_disable_txq(mvm, tid_data->txq_id,
+				    vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
+				    CMD_ASYNC);
 		tid_data->state = IWL_AGG_OFF;
-		/*
-		 * we can't hold the mutex - but since we are after a sequence
-		 * point (call to iwl_mvm_disable_txq(), so we don't even need
-		 * a memory barrier.
-		 */
-		mvm->queue_to_mac80211[tid_data->txq_id] =
-					IWL_INVALID_MAC80211_QUEUE;
 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
 		break;
 
@@ -1104,7 +1099,7 @@
  * 2) flush the Tx path
  * 3) wait for the transport queues to be empty
  */
-int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
 {
 	int ret;
 	struct iwl_tx_path_flush_cmd flush_cmd = {
@@ -1112,8 +1107,6 @@
 		.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
 	};
 
-	u32 flags = sync ? 0 : CMD_ASYNC;
-
 	ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
 				   sizeof(flush_cmd), &flush_cmd);
 	if (ret)
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index a7d4342..ad0f169 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -657,45 +658,143 @@
 	if (mvm->support_umac_log)
 		iwl_mvm_dump_umac_error_log(mvm);
 }
-void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
-			const struct iwl_trans_txq_scd_cfg *cfg,
+
+int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
+{
+	int i;
+
+	lockdep_assert_held(&mvm->queue_info_lock);
+
+	for (i = minq; i <= maxq; i++)
+		if (mvm->queue_info[i].hw_queue_refcount == 0 &&
+		    !mvm->queue_info[i].setup_reserved)
+			return i;
+
+	return -ENOSPC;
+}
+
+void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
 			unsigned int wdg_timeout)
 {
-	struct iwl_scd_txq_cfg_cmd cmd = {
-		.scd_queue = queue,
-		.enable = 1,
-		.window = cfg->frame_limit,
-		.sta_id = cfg->sta_id,
-		.ssn = cpu_to_le16(ssn),
-		.tx_fifo = cfg->fifo,
-		.aggregate = cfg->aggregate,
-		.tid = cfg->tid,
-	};
+	bool enable_queue = true;
 
-	if (!iwl_mvm_is_scd_cfg_supported(mvm)) {
-		iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, cfg,
-					 wdg_timeout);
+	spin_lock_bh(&mvm->queue_info_lock);
+
+	/* Make sure this TID isn't already enabled */
+	if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
+		spin_unlock_bh(&mvm->queue_info_lock);
+		IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n",
+			cfg->tid);
 		return;
 	}
 
-	iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
-	WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
-	     "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+	/* Update mappings and refcounts */
+	mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue);
+	mvm->queue_info[queue].hw_queue_refcount++;
+	if (mvm->queue_info[queue].hw_queue_refcount > 1)
+		enable_queue = false;
+	mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
+
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
+			    queue, mvm->queue_info[queue].hw_queue_refcount,
+			    mvm->queue_info[queue].hw_queue_to_mac80211);
+
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	/* Send the enabling command if we need to */
+	if (enable_queue) {
+		struct iwl_scd_txq_cfg_cmd cmd = {
+			.scd_queue = queue,
+			.enable = 1,
+			.window = cfg->frame_limit,
+			.sta_id = cfg->sta_id,
+			.ssn = cpu_to_le16(ssn),
+			.tx_fifo = cfg->fifo,
+			.aggregate = cfg->aggregate,
+			.tid = cfg->tid,
+		};
+
+		iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
+					 wdg_timeout);
+		WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
+					  &cmd),
+		     "Failed to configure queue %d on FIFO %d\n", queue,
+		     cfg->fifo);
+	}
 }
 
-void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags)
+void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			 u8 tid, u8 flags)
 {
 	struct iwl_scd_txq_cfg_cmd cmd = {
 		.scd_queue = queue,
 		.enable = 0,
 	};
+	bool remove_mac_queue = true;
 	int ret;
 
-	if (!iwl_mvm_is_scd_cfg_supported(mvm)) {
-		iwl_trans_txq_disable(mvm->trans, queue, true);
+	spin_lock_bh(&mvm->queue_info_lock);
+
+	if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
+		spin_unlock_bh(&mvm->queue_info_lock);
 		return;
 	}
 
+	mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+	/*
+	 * If there is another TID with the same AC - don't remove the MAC queue
+	 * from the mapping
+	 */
+	if (tid < IWL_MAX_TID_COUNT) {
+		unsigned long tid_bitmap =
+			mvm->queue_info[queue].tid_bitmap;
+		int ac = tid_to_mac80211_ac[tid];
+		int i;
+
+		for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
+			if (tid_to_mac80211_ac[i] == ac)
+				remove_mac_queue = false;
+		}
+	}
+
+	if (remove_mac_queue)
+		mvm->queue_info[queue].hw_queue_to_mac80211 &=
+			~BIT(mac80211_queue);
+	mvm->queue_info[queue].hw_queue_refcount--;
+
+	cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
+
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
+			    queue,
+			    mvm->queue_info[queue].hw_queue_refcount,
+			    mvm->queue_info[queue].hw_queue_to_mac80211);
+
+	/* If the queue is still enabled - nothing left to do in this func */
+	if (cmd.enable) {
+		spin_unlock_bh(&mvm->queue_info_lock);
+		return;
+	}
+
+	/* Make sure queue info is correct even though we overwrite it */
+	WARN(mvm->queue_info[queue].hw_queue_refcount ||
+	     mvm->queue_info[queue].tid_bitmap ||
+	     mvm->queue_info[queue].hw_queue_to_mac80211,
+	     "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
+	     queue, mvm->queue_info[queue].hw_queue_refcount,
+	     mvm->queue_info[queue].hw_queue_to_mac80211,
+	     mvm->queue_info[queue].tid_bitmap);
+
+	/* If we are here - the queue is freed and we can zero out these vals */
+	mvm->queue_info[queue].hw_queue_refcount = 0;
+	mvm->queue_info[queue].tid_bitmap = 0;
+	mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
+
+	spin_unlock_bh(&mvm->queue_info_lock);
+
 	iwl_trans_txq_disable(mvm->trans, queue, false);
 	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
 				   sizeof(cmd), &cmd);
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index b0825c4..644b58b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -414,6 +414,11 @@
 	{IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
 	{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
 
 /* 8000 Series */
 	{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 6ba7d30..9028345 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -592,10 +592,8 @@
 
 		do {
 			ret = iwl_pcie_set_hw_ready(trans);
-			if (ret >= 0) {
-				ret = 0;
-				goto out;
-			}
+			if (ret >= 0)
+				return 0;
 
 			usleep_range(200, 1000);
 			t += 200;
@@ -605,10 +603,6 @@
 
 	IWL_ERR(trans, "Couldn't prepare the card\n");
 
-out:
-	iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
-		      CSR_RESET_LINK_PWR_MGMT_DISABLED);
-
 	return ret;
 }
 
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 66c963d..ee46f46 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1819,7 +1819,7 @@
 				       struct ieee80211_vif *vif,
 				       enum ieee80211_ampdu_mlme_action action,
 				       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-				       u8 buf_size)
+				       u8 buf_size, bool amsdu)
 {
 	switch (action) {
 	case IEEE80211_AMPDU_TX_START:
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 169384b..f715eee 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -335,7 +335,8 @@
 static int
 mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		  enum ieee80211_ampdu_mlme_action action,
-		  struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size)
+		  struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
+		  bool amsdu)
 {
 	struct mt7601u_dev *dev = hw->priv;
 	struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index f7c7172..aa498e0 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -173,7 +173,6 @@
 	int pad = 0, aggr_num = 0, ret;
 	struct mwifiex_tx_param tx_param;
 	struct txpd *ptx_pd = NULL;
-	struct timeval tv;
 	int headroom = adapter->iface_type == MWIFIEX_USB ? 0 : INTF_HEADER_LEN;
 
 	skb_src = skb_peek(&pra_list->skb_head);
@@ -202,9 +201,9 @@
 		tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
 	tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_AGGR_PKT;
 	skb_aggr->priority = skb_src->priority;
+	skb_aggr->tstamp = skb_src->tstamp;
 
-	do_gettimeofday(&tv);
-	skb_aggr->tstamp = timeval_to_ktime(tv);
+	skb_aggr->tstamp = ktime_get_real();
 
 	do {
 		/* Check if AMSDU can accommodate this MSDU */
@@ -258,8 +257,7 @@
 	}
 
 	if (adapter->iface_type == MWIFIEX_USB) {
-		adapter->data_sent = true;
-		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
+		ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
 						   skb_aggr, NULL);
 	} else {
 		if (skb_src)
@@ -299,16 +297,12 @@
 		mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
 		break;
 	case -1:
-		if (adapter->iface_type != MWIFIEX_PCIE)
-			adapter->data_sent = false;
 		mwifiex_dbg(adapter, ERROR, "%s: host_to_card failed: %#x\n",
 			    __func__, ret);
 		adapter->dbg.num_tx_host_to_card_failure++;
 		mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
 		return 0;
 	case -EINPROGRESS:
-		if (adapter->iface_type != MWIFIEX_PCIE)
-			adapter->data_sent = false;
 		break;
 	case 0:
 		mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 317d991..279167d 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -33,12 +33,12 @@
 	  mwifiex_pcie.
 
 config MWIFIEX_USB
-	tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997"
+	tristate "Marvell WiFi-Ex Driver for USB8766/8797/8997"
 	depends on MWIFIEX && USB
 	select FW_LOADER
 	---help---
 	  This adds support for wireless adapters based on Marvell
-	  8797/8897/8997 chipset with USB interface.
+	  8797/8997 chipset with USB interface.
 
 	  If you choose to build it as a module, it will be called
 	  mwifiex_usb.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index ff63cb5..4073116 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1821,6 +1821,10 @@
 		return -1;
 	}
 
+	if (netif_carrier_ok(priv->netdev))
+		netif_carrier_off(priv->netdev);
+	mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
+
 	return 0;
 }
 
@@ -1925,6 +1929,10 @@
 	if (mwifiex_set_mgmt_ies(priv, &params->beacon))
 		return -1;
 
+	if (!netif_carrier_ok(priv->netdev))
+		netif_carrier_on(priv->netdev);
+	mwifiex_wake_up_net_dev_queue(priv->netdev, priv->adapter);
+
 	memcpy(&priv->bss_cfg, bss_cfg, sizeof(priv->bss_cfg));
 	kfree(bss_cfg);
 	return 0;
@@ -1994,8 +2002,10 @@
 				  CFG80211_BSS_FTYPE_UNKNOWN,
 				  bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
 				  0, ie_buf, ie_len, 0, GFP_KERNEL);
-	cfg80211_put_bss(priv->wdev.wiphy, bss);
-	memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
+	if (bss) {
+		cfg80211_put_bss(priv->wdev.wiphy, bss);
+		ether_addr_copy(priv->cfg_bssid, bss_info.bssid);
+	}
 
 	return 0;
 }
@@ -2372,7 +2382,7 @@
  * CFG802.11 operation handler for scan request.
  *
  * This function issues a scan request to the firmware based upon
- * the user specified scan configuration. On successfull completion,
+ * the user specified scan configuration. On successful completion,
  * it also informs the results.
  */
 static int
@@ -2859,14 +2869,14 @@
 	case NL80211_IFTYPE_UNSPECIFIED:
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_ADHOC:
-		adapter->curr_iface_comb.sta_intf++;
+		adapter->curr_iface_comb.sta_intf--;
 		break;
 	case NL80211_IFTYPE_AP:
-		adapter->curr_iface_comb.uap_intf++;
+		adapter->curr_iface_comb.uap_intf--;
 		break;
 	case NL80211_IFTYPE_P2P_CLIENT:
 	case NL80211_IFTYPE_P2P_GO:
-		adapter->curr_iface_comb.p2p_intf++;
+		adapter->curr_iface_comb.p2p_intf--;
 		break;
 	default:
 		mwifiex_dbg(adapter, ERROR,
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 5a0636d4..9824d8d 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -731,7 +731,7 @@
 		(struct mwifiex_private *) file->private_data;
 	unsigned long addr = get_zeroed_page(GFP_KERNEL);
 	char *buf = (char *) addr;
-	int pos = 0, ret = 0, i;
+	int pos, ret, i;
 	u8 value[MAX_EEPROM_DATA];
 
 	if (!buf)
@@ -739,7 +739,7 @@
 
 	if (saved_offset == -1) {
 		/* No command has been given */
-		pos += snprintf(buf, PAGE_SIZE, "0");
+		pos = snprintf(buf, PAGE_SIZE, "0");
 		goto done;
 	}
 
@@ -748,17 +748,17 @@
 				  (u16) saved_bytes, value);
 	if (ret) {
 		ret = -EINVAL;
-		goto done;
+		goto out_free;
 	}
 
-	pos += snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
+	pos = snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
 
 	for (i = 0; i < saved_bytes; i++)
-		pos += snprintf(buf + strlen(buf), PAGE_SIZE, "%d ", value[i]);
-
-	ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+		pos += scnprintf(buf + pos, PAGE_SIZE - pos, "%d ", value[i]);
 
 done:
+	ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+out_free:
 	free_page(addr);
 	return ret;
 }
@@ -856,6 +856,56 @@
 	return ret;
 }
 
+static ssize_t
+mwifiex_timeshare_coex_read(struct file *file, char __user *ubuf,
+			    size_t count, loff_t *ppos)
+{
+	struct mwifiex_private *priv = file->private_data;
+	char buf[3];
+	bool timeshare_coex;
+	int ret;
+	unsigned int len;
+
+	if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15)
+		return -EOPNOTSUPP;
+
+	ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
+			       HostCmd_ACT_GEN_GET, 0, &timeshare_coex, true);
+	if (ret)
+		return ret;
+
+	len = sprintf(buf, "%d\n", timeshare_coex);
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static ssize_t
+mwifiex_timeshare_coex_write(struct file *file, const char __user *ubuf,
+			     size_t count, loff_t *ppos)
+{
+	bool timeshare_coex;
+	struct mwifiex_private *priv = file->private_data;
+	char kbuf[16];
+	int ret;
+
+	if (priv->adapter->fw_api_ver != MWIFIEX_FW_V15)
+		return -EOPNOTSUPP;
+
+	memset(kbuf, 0, sizeof(kbuf));
+
+	if (copy_from_user(&kbuf, ubuf, min_t(size_t, sizeof(kbuf) - 1, count)))
+		return -EFAULT;
+
+	if (strtobool(kbuf, &timeshare_coex))
+		return -EINVAL;
+
+	ret = mwifiex_send_cmd(priv, HostCmd_CMD_ROBUST_COEX,
+			       HostCmd_ACT_GEN_SET, 0, &timeshare_coex, true);
+	if (ret)
+		return ret;
+	else
+		return count;
+}
+
 #define MWIFIEX_DFS_ADD_FILE(name) do {                                 \
 	if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir,        \
 			priv, &mwifiex_dfs_##name##_fops))              \
@@ -892,6 +942,7 @@
 MWIFIEX_DFS_FILE_OPS(hscfg);
 MWIFIEX_DFS_FILE_OPS(histogram);
 MWIFIEX_DFS_FILE_OPS(debug_mask);
+MWIFIEX_DFS_FILE_OPS(timeshare_coex);
 
 /*
  * This function creates the debug FS directory structure and the files.
@@ -918,6 +969,7 @@
 	MWIFIEX_DFS_ADD_FILE(hscfg);
 	MWIFIEX_DFS_ADD_FILE(histogram);
 	MWIFIEX_DFS_ADD_FILE(debug_mask);
+	MWIFIEX_DFS_ADD_FILE(timeshare_coex);
 }
 
 /*
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 3ec2ac8..1e1e81a 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -101,9 +101,13 @@
 #define FIRMWARE_READY_SDIO				0xfedc
 #define FIRMWARE_READY_PCIE				0xfedcba00
 
+#define MWIFIEX_COEX_MODE_TIMESHARE			0x01
+#define MWIFIEX_COEX_MODE_SPATIAL			0x82
+
 enum mwifiex_usb_ep {
 	MWIFIEX_USB_EP_CMD_EVENT = 1,
 	MWIFIEX_USB_EP_DATA = 2,
+	MWIFIEX_USB_EP_DATA_CH2 = 3,
 };
 
 enum MWIFIEX_802_11_PRIVACY_FILTER {
@@ -162,6 +166,7 @@
 #define TLV_TYPE_CHANRPT_11H_BASIC  (PROPRIETARY_TLV_BASE_ID + 91)
 #define TLV_TYPE_UAP_RETRY_LIMIT    (PROPRIETARY_TLV_BASE_ID + 93)
 #define TLV_TYPE_WAPI_IE            (PROPRIETARY_TLV_BASE_ID + 94)
+#define TLV_TYPE_ROBUST_COEX        (PROPRIETARY_TLV_BASE_ID + 96)
 #define TLV_TYPE_UAP_MGMT_FRAME     (PROPRIETARY_TLV_BASE_ID + 104)
 #define TLV_TYPE_MGMT_IE            (PROPRIETARY_TLV_BASE_ID + 105)
 #define TLV_TYPE_AUTO_DS_PARAM      (PROPRIETARY_TLV_BASE_ID + 113)
@@ -173,6 +178,7 @@
 #define TLV_TYPE_COALESCE_RULE      (PROPRIETARY_TLV_BASE_ID + 154)
 #define TLV_TYPE_KEY_PARAM_V2       (PROPRIETARY_TLV_BASE_ID + 156)
 #define TLV_TYPE_MULTI_CHAN_INFO    (PROPRIETARY_TLV_BASE_ID + 183)
+#define TLV_TYPE_MC_GROUP_INFO      (PROPRIETARY_TLV_BASE_ID + 184)
 #define TLV_TYPE_TDLS_IDLE_TIMEOUT  (PROPRIETARY_TLV_BASE_ID + 194)
 #define TLV_TYPE_SCAN_CHANNEL_GAP   (PROPRIETARY_TLV_BASE_ID + 197)
 #define TLV_TYPE_API_REV            (PROPRIETARY_TLV_BASE_ID + 199)
@@ -352,6 +358,7 @@
 #define HostCmd_CMD_AMSDU_AGGR_CTRL                   0x00df
 #define HostCmd_CMD_TXPWR_CFG                         0x00d1
 #define HostCmd_CMD_TX_RATE_CFG                       0x00d6
+#define HostCmd_CMD_ROBUST_COEX                       0x00e0
 #define HostCmd_CMD_802_11_PS_MODE_ENH                0x00e4
 #define HostCmd_CMD_802_11_HS_CFG_ENH                 0x00e5
 #define HostCmd_CMD_P2P_MODE_CFG                      0x00eb
@@ -1875,6 +1882,11 @@
 	u8 reserved;
 } __packed;
 
+struct mwifiex_ie_types_robust_coex {
+	struct mwifiex_ie_types_header header;
+	__le32 mode;
+} __packed;
+
 struct host_cmd_ds_version_ext {
 	u8 version_str_sel;
 	char version_str[128];
@@ -1984,6 +1996,22 @@
 	u8 tlv_buffer[0];
 } __packed;
 
+struct mwifiex_ie_types_mc_group_info {
+	struct mwifiex_ie_types_header header;
+	u8 chan_group_id;
+	u8 chan_buf_weight;
+	u8 band_config;
+	u8 chan_num;
+	u32 chan_time;
+	u32 reserved;
+	union {
+		u8 sdio_func_num;
+		u8 usb_ep_num;
+	} hid_num;
+	u8 intf_num;
+	u8 bss_type_numlist[0];
+} __packed;
+
 struct meas_rpt_map {
 	u8 rssi:3;
 	u8 unmeasured:1;
@@ -2060,6 +2088,11 @@
 	__le16 policy;
 } __packed;
 
+struct host_cmd_ds_robust_coex {
+	__le16 action;
+	__le16 reserved;
+} __packed;
+
 struct host_cmd_ds_command {
 	__le16 command;
 	__le16 size;
@@ -2129,6 +2162,7 @@
 		struct host_cmd_ds_chan_rpt_req chan_rpt_req;
 		struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
 		struct host_cmd_ds_multi_chan_policy mc_policy;
+		struct host_cmd_ds_robust_coex coex;
 	} params;
 } __packed;
 
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 5d3ae63..de74a77 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -78,6 +78,7 @@
 	priv->media_connected = false;
 	eth_broadcast_addr(priv->curr_addr);
 	priv->port_open = false;
+	priv->usb_port = MWIFIEX_USB_EP_DATA;
 	priv->pkt_tx_ctrl = 0;
 	priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
 	priv->data_rate = 0;	/* Initially indicate the rate as auto */
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 278dc94..969ca1e 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -294,9 +294,15 @@
 			/* We have tried to wakeup the card already */
 			if (adapter->pm_wakeup_fw_try)
 				break;
-			if (adapter->ps_state != PS_STATE_AWAKE ||
-			    adapter->tx_lock_flag)
+			if (adapter->ps_state != PS_STATE_AWAKE)
 				break;
+			if (adapter->tx_lock_flag) {
+				if (adapter->iface_type == MWIFIEX_USB) {
+					if (!adapter->usb_mc_setup)
+						break;
+				} else
+					break;
+			}
 
 			if ((!adapter->scan_chan_gap_enabled &&
 			     adapter->scan_processing) || adapter->data_sent ||
@@ -345,11 +351,18 @@
 		 */
 		if ((adapter->ps_state == PS_STATE_SLEEP) ||
 		    (adapter->ps_state == PS_STATE_PRE_SLEEP) ||
-		    (adapter->ps_state == PS_STATE_SLEEP_CFM) ||
-		    adapter->tx_lock_flag){
+		    (adapter->ps_state == PS_STATE_SLEEP_CFM)) {
 			continue;
 		}
 
+		if (adapter->tx_lock_flag) {
+			if (adapter->iface_type == MWIFIEX_USB) {
+				if (!adapter->usb_mc_setup)
+					continue;
+			} else
+				continue;
+		}
+
 		if (!adapter->cmd_sent && !adapter->curr_cmd &&
 		    mwifiex_is_send_cmd_allowed
 		    (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
@@ -359,6 +372,13 @@
 			}
 		}
 
+		/** If USB Multi channel setup ongoing,
+		 *  wait for ready to tx data.
+		 */
+		if (adapter->iface_type == MWIFIEX_USB &&
+		    adapter->usb_mc_setup)
+			continue;
+
 		if ((adapter->scan_chan_gap_enabled ||
 		     !adapter->scan_processing) &&
 		    !adapter->data_sent &&
@@ -928,6 +948,32 @@
 	}
 }
 
+void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter)
+{
+	struct usb_card_rec *card = adapter->card;
+	struct mwifiex_private *priv;
+	u16 tx_buf_size;
+	int i, ret;
+
+	card->mc_resync_flag = true;
+	for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
+		if (atomic_read(&card->port[i].tx_data_urb_pending)) {
+			mwifiex_dbg(adapter, WARN, "pending data urb in sys\n");
+			return;
+		}
+	}
+
+	card->mc_resync_flag = false;
+	tx_buf_size = 0xffff;
+	priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+	ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
+			       HostCmd_ACT_GEN_SET, 0, &tx_buf_size, false);
+	if (ret)
+		mwifiex_dbg(adapter, ERROR,
+			    "send reconfig tx buf size cmd err\n");
+}
+EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync);
+
 void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
 {
 	void *p;
@@ -963,8 +1009,10 @@
 		cardp = (struct usb_card_rec *)adapter->card;
 		p += sprintf(p, "tx_cmd_urb_pending = %d\n",
 			     atomic_read(&cardp->tx_cmd_urb_pending));
-		p += sprintf(p, "tx_data_urb_pending = %d\n",
-			     atomic_read(&cardp->tx_data_urb_pending));
+		p += sprintf(p, "tx_data_urb_pending_port_0 = %d\n",
+			     atomic_read(&cardp->port[0].tx_data_urb_pending));
+		p += sprintf(p, "tx_data_urb_pending_port_1 = %d\n",
+			     atomic_read(&cardp->port[1].tx_data_urb_pending));
 		p += sprintf(p, "rx_cmd_urb_pending = %d\n",
 			     atomic_read(&cardp->rx_cmd_urb_pending));
 		p += sprintf(p, "rx_data_urb_pending = %d\n",
@@ -1151,6 +1199,7 @@
 	.ndo_stop = mwifiex_close,
 	.ndo_start_xmit = mwifiex_hard_start_xmit,
 	.ndo_set_mac_address = mwifiex_set_mac_address,
+	.ndo_validate_addr = eth_validate_addr,
 	.ndo_tx_timeout = mwifiex_tx_timeout,
 	.ndo_get_stats = mwifiex_get_stats,
 	.ndo_set_rx_mode = mwifiex_set_multicast_list,
@@ -1447,6 +1496,26 @@
 }
 EXPORT_SYMBOL_GPL(mwifiex_remove_card);
 
+void _mwifiex_dbg(const struct mwifiex_adapter *adapter, int mask,
+		  const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	if (!adapter->dev || !(adapter->debug_mask & mask))
+		return;
+
+	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	dev_info(adapter->dev, "%pV", &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL_GPL(_mwifiex_dbg);
+
 /*
  * This function initializes the module.
  *
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 6b95121..3959f1c 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -48,6 +48,9 @@
 
 extern const char driver_version[];
 
+struct mwifiex_adapter;
+struct mwifiex_private;
+
 enum {
 	MWIFIEX_ASYNC_CMD,
 	MWIFIEX_SYNC_CMD
@@ -180,12 +183,11 @@
 					MWIFIEX_DBG_FATAL | \
 					MWIFIEX_DBG_ERROR)
 
-#define mwifiex_dbg(adapter, dbg_mask, fmt, args...)		\
-do {								\
-	if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask)	\
-		if ((adapter)->dev)				\
-			dev_info((adapter)->dev, fmt, ## args);	\
-} while (0)
+__printf(3, 4)
+void _mwifiex_dbg(const struct mwifiex_adapter *adapter, int mask,
+		  const char *fmt, ...);
+#define mwifiex_dbg(adapter, mask, fmt, ...)				\
+	_mwifiex_dbg(adapter, MWIFIEX_DBG_##mask, fmt, ##__VA_ARGS__)
 
 #define DEBUG_DUMP_DATA_MAX_LEN		128
 #define mwifiex_dbg_dump(adapter, dbg_mask, str, buf, len)	\
@@ -506,9 +508,6 @@
 	MWIFIEX_IFACE_WORK_CARD_RESET,
 };
 
-struct mwifiex_adapter;
-struct mwifiex_private;
-
 struct mwifiex_private {
 	struct mwifiex_adapter *adapter;
 	u8 bss_type;
@@ -520,6 +519,7 @@
 	u8 curr_addr[ETH_ALEN];
 	u8 media_connected;
 	u8 port_open;
+	u8 usb_port;
 	u32 num_tx_timeout;
 	/* track consecutive timeout */
 	u8 tx_timeout_cnt;
@@ -816,6 +816,8 @@
 	void (*iface_work)(struct work_struct *work);
 	void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
 	void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *);
+	void (*multi_port_resync)(struct mwifiex_adapter *);
+	bool (*is_port_ready)(struct mwifiex_private *);
 };
 
 struct mwifiex_adapter {
@@ -861,6 +863,8 @@
 	u8 more_task_flag;
 	u16 tx_buf_size;
 	u16 curr_tx_buf_size;
+	/* sdio single port rx aggregation capability */
+	bool host_disable_sdio_rx_aggr;
 	bool sdio_rx_aggr_enable;
 	u16 sdio_rx_block_size;
 	u32 ioport;
@@ -988,6 +992,8 @@
 	u8 coex_rx_win_size;
 	bool drcs_enabled;
 	u8 active_scan_triggered;
+	bool usb_mc_status;
+	bool usb_mc_setup;
 };
 
 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1561,6 +1567,7 @@
 				    struct sk_buff *event);
 void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
 				      struct sk_buff *event_skb);
+void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter);
 
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 408b684..21192b6 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -1815,7 +1815,6 @@
 	if (!card->evt_buf_list[rdptr]) {
 		skb_push(skb, INTF_HEADER_LEN);
 		skb_put(skb, MAX_EVENT_SIZE - skb->len);
-		memset(skb->data, 0, MAX_EVENT_SIZE);
 		if (mwifiex_map_pci_memory(adapter, skb,
 					   MAX_EVENT_SIZE,
 					   PCI_DMA_FROMDEVICE))
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 5847863..c20017c 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1839,14 +1839,18 @@
 					    bssid, timestamp,
 					    cap_info_bitmap, beacon_period,
 					    ie_buf, ie_len, rssi, GFP_KERNEL);
-			bss_priv = (struct mwifiex_bss_priv *)bss->priv;
-			bss_priv->band = band;
-			bss_priv->fw_tsf = fw_tsf;
-			if (priv->media_connected &&
-			    !memcmp(bssid, priv->curr_bss_params.bss_descriptor
-				    .mac_address, ETH_ALEN))
-				mwifiex_update_curr_bss_params(priv, bss);
-			cfg80211_put_bss(priv->wdev.wiphy, bss);
+			if (bss) {
+				bss_priv = (struct mwifiex_bss_priv *)bss->priv;
+				bss_priv->band = band;
+				bss_priv->fw_tsf = fw_tsf;
+				if (priv->media_connected &&
+				    !memcmp(bssid, priv->curr_bss_params.
+					    bss_descriptor.mac_address,
+					    ETH_ALEN))
+					mwifiex_update_curr_bss_params(priv,
+								       bss);
+				cfg80211_put_bss(priv->wdev.wiphy, bss);
+			}
 
 			if ((chan->flags & IEEE80211_CHAN_RADAR) ||
 			    (chan->flags & IEEE80211_CHAN_NO_IR)) {
@@ -1889,7 +1893,7 @@
 	u8 id = 0;
 	struct mwifiex_user_scan_cfg  *user_scan_cfg;
 
-	if (adapter->active_scan_triggered) {
+	if (adapter->active_scan_triggered || !priv->scan_request) {
 		adapter->active_scan_triggered = false;
 		return 0;
 	}
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 5d05c6f..78a8474 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -1606,8 +1606,9 @@
 				(rx_len + MWIFIEX_SDIO_BLOCK_SIZE -
 				 1) / MWIFIEX_SDIO_BLOCK_SIZE;
 			if (rx_len <= INTF_HEADER_LEN ||
-			    (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
-			     card->mpa_rx.buf_size) {
+			    (card->mpa_rx.enabled &&
+			     ((rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
+			      card->mpa_rx.buf_size))) {
 				mwifiex_dbg(adapter, ERROR,
 					    "invalid rx_len=%d\n",
 					    rx_len);
@@ -1925,6 +1926,8 @@
 	if (ret) {
 		kfree(card->mpa_tx.buf);
 		kfree(card->mpa_rx.buf);
+		card->mpa_tx.buf_size = 0;
+		card->mpa_rx.buf_size = 0;
 	}
 
 	return ret;
@@ -2055,16 +2058,26 @@
 	ret = mwifiex_alloc_sdio_mpa_buffers(adapter,
 					     card->mp_tx_agg_buf_size,
 					     card->mp_rx_agg_buf_size);
-	if (ret) {
-		mwifiex_dbg(adapter, ERROR,
-			    "failed to alloc sdio mp-a buffers\n");
-		kfree(card->mp_regs);
-		return -1;
+
+	/* Allocate 32k MPA Tx/Rx buffers if 64k memory allocation fails */
+	if (ret && (card->mp_tx_agg_buf_size == MWIFIEX_MP_AGGR_BUF_SIZE_MAX ||
+		    card->mp_rx_agg_buf_size == MWIFIEX_MP_AGGR_BUF_SIZE_MAX)) {
+		/* Disable rx single port aggregation */
+		adapter->host_disable_sdio_rx_aggr = true;
+
+		ret = mwifiex_alloc_sdio_mpa_buffers
+			(adapter, MWIFIEX_MP_AGGR_BUF_SIZE_32K,
+			 MWIFIEX_MP_AGGR_BUF_SIZE_32K);
+		if (ret) {
+			/* Disable multi port aggregation */
+			card->mpa_tx.enabled = 0;
+			card->mpa_rx.enabled = 0;
+		}
 	}
 
 	adapter->auto_tdls = card->can_auto_tdls;
 	adapter->ext_scan = card->can_ext_scan;
-	return ret;
+	return 0;
 }
 
 /*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index a49a80d..e486867 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -1531,6 +1531,33 @@
 	return 0;
 }
 
+static int mwifiex_cmd_robust_coex(struct mwifiex_private *priv,
+				   struct host_cmd_ds_command *cmd,
+				   u16 cmd_action, bool *is_timeshare)
+{
+	struct host_cmd_ds_robust_coex *coex = &cmd->params.coex;
+	struct mwifiex_ie_types_robust_coex *coex_tlv;
+
+	cmd->command = cpu_to_le16(HostCmd_CMD_ROBUST_COEX);
+	cmd->size = cpu_to_le16(sizeof(*coex) + sizeof(*coex_tlv) + S_DS_GEN);
+
+	coex->action = cpu_to_le16(cmd_action);
+	coex_tlv = (struct mwifiex_ie_types_robust_coex *)
+				((u8 *)coex + sizeof(*coex));
+	coex_tlv->header.type = cpu_to_le16(TLV_TYPE_ROBUST_COEX);
+	coex_tlv->header.len = cpu_to_le16(sizeof(coex_tlv->mode));
+
+	if (coex->action == HostCmd_ACT_GEN_GET)
+		return 0;
+
+	if (*is_timeshare)
+		coex_tlv->mode = cpu_to_le32(MWIFIEX_COEX_MODE_TIMESHARE);
+	else
+		coex_tlv->mode = cpu_to_le32(MWIFIEX_COEX_MODE_SPATIAL);
+
+	return 0;
+}
+
 static int
 mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
 			 struct host_cmd_ds_command *cmd,
@@ -2040,6 +2067,10 @@
 		ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
 						data_buf);
 		break;
+	case HostCmd_CMD_ROBUST_COEX:
+		ret = mwifiex_cmd_robust_coex(priv, cmd_ptr, cmd_action,
+					      data_buf);
+		break;
 	default:
 		mwifiex_dbg(priv->adapter, ERROR,
 			    "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2125,7 +2156,8 @@
 
 		/** Set SDIO Single Port RX Aggr Info */
 		if (priv->adapter->iface_type == MWIFIEX_SDIO &&
-		    ISSUPP_SDIO_SPA_ENABLED(priv->adapter->fw_cap_info)) {
+		    ISSUPP_SDIO_SPA_ENABLED(priv->adapter->fw_cap_info) &&
+		    !priv->adapter->host_disable_sdio_rx_aggr) {
 			sdio_sp_rx_aggr_enable = true;
 			ret = mwifiex_send_cmd(priv,
 					       HostCmd_CMD_SDIO_SP_RX_AGGR_CFG,
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 87b69d8..9ac7aa2 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -1007,6 +1007,28 @@
 	return 0;
 }
 
+static int mwifiex_ret_robust_coex(struct mwifiex_private *priv,
+				   struct host_cmd_ds_command *resp,
+				   bool *is_timeshare)
+{
+	struct host_cmd_ds_robust_coex *coex = &resp->params.coex;
+	struct mwifiex_ie_types_robust_coex *coex_tlv;
+	u16 action = le16_to_cpu(coex->action);
+	u32 mode;
+
+	coex_tlv = (struct mwifiex_ie_types_robust_coex
+		    *)((u8 *)coex + sizeof(struct host_cmd_ds_robust_coex));
+	if (action == HostCmd_ACT_GEN_GET) {
+		mode = le32_to_cpu(coex_tlv->mode);
+		if (mode == MWIFIEX_COEX_MODE_TIMESHARE)
+			*is_timeshare = true;
+		else
+			*is_timeshare = false;
+	}
+
+	return 0;
+}
+
 /*
  * This function handles the command responses.
  *
@@ -1128,6 +1150,17 @@
 		ret = mwifiex_ret_11n_addba_resp(priv, resp);
 		break;
 	case HostCmd_CMD_RECONFIGURE_TX_BUFF:
+		if (0xffff == (u16)le16_to_cpu(resp->params.tx_buf.buff_size)) {
+			if (adapter->iface_type == MWIFIEX_USB &&
+			    adapter->usb_mc_setup) {
+				if (adapter->if_ops.multi_port_resync)
+					adapter->if_ops.
+						multi_port_resync(adapter);
+				adapter->usb_mc_setup = false;
+				adapter->tx_lock_flag = false;
+			}
+			break;
+		}
 		adapter->tx_buf_size = (u16) le16_to_cpu(resp->params.
 							     tx_buf.buff_size);
 		adapter->tx_buf_size = (adapter->tx_buf_size
@@ -1202,6 +1235,9 @@
 		break;
 	case HostCmd_CMD_TDLS_CONFIG:
 		break;
+	case HostCmd_CMD_ROBUST_COEX:
+		ret = mwifiex_ret_robust_coex(priv, resp, data_buf);
+		break;
 	default:
 		mwifiex_dbg(adapter, ERROR,
 			    "CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 3d18c58..ff3ee9d 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -313,24 +313,78 @@
 				      struct sk_buff *event_skb)
 {
 	struct mwifiex_ie_types_multi_chan_info *chan_info;
-	u16 status;
+	struct mwifiex_ie_types_mc_group_info *grp_info;
+	struct mwifiex_adapter *adapter = priv->adapter;
+	struct mwifiex_ie_types_header *tlv;
+	u16 tlv_buf_left, tlv_type, tlv_len;
+	int intf_num, bss_type, bss_num, i;
+	struct mwifiex_private *intf_priv;
 
+	tlv_buf_left = event_skb->len - sizeof(u32);
 	chan_info = (void *)event_skb->data + sizeof(u32);
 
-	if (le16_to_cpu(chan_info->header.type) != TLV_TYPE_MULTI_CHAN_INFO) {
-		mwifiex_dbg(priv->adapter, ERROR,
+	if (le16_to_cpu(chan_info->header.type) != TLV_TYPE_MULTI_CHAN_INFO ||
+	    tlv_buf_left < sizeof(struct mwifiex_ie_types_multi_chan_info)) {
+		mwifiex_dbg(adapter, ERROR,
 			    "unknown TLV in chan_info event\n");
 		return;
 	}
 
-	status = le16_to_cpu(chan_info->status);
+	adapter->usb_mc_status = le16_to_cpu(chan_info->status);
+	mwifiex_dbg(adapter, EVENT, "multi chan operation %s\n",
+		    adapter->usb_mc_status ? "started" : "over");
 
-	if (status) {
-		mwifiex_dbg(priv->adapter, EVENT,
-			    "multi-channel operation started\n");
-	} else {
-		mwifiex_dbg(priv->adapter, EVENT,
-			    "multi-channel operation over\n");
+	tlv_buf_left -= sizeof(struct mwifiex_ie_types_multi_chan_info);
+	tlv = (struct mwifiex_ie_types_header *)chan_info->tlv_buffer;
+
+	while (tlv_buf_left >= (int)sizeof(struct mwifiex_ie_types_header)) {
+		tlv_type = le16_to_cpu(tlv->type);
+		tlv_len  = le16_to_cpu(tlv->len);
+		if ((sizeof(struct mwifiex_ie_types_header) + tlv_len) >
+		    tlv_buf_left) {
+			mwifiex_dbg(adapter, ERROR, "wrong tlv: tlvLen=%d,\t"
+				    "tlvBufLeft=%d\n", tlv_len, tlv_buf_left);
+			break;
+		}
+		if (tlv_type != TLV_TYPE_MC_GROUP_INFO) {
+			mwifiex_dbg(adapter, ERROR, "wrong tlv type: 0x%x\n",
+				    tlv_type);
+			break;
+		}
+
+		grp_info = (struct mwifiex_ie_types_mc_group_info *)tlv;
+		intf_num = grp_info->intf_num;
+		for (i = 0; i < intf_num; i++) {
+			bss_type = grp_info->bss_type_numlist[i] >> 4;
+			bss_num = grp_info->bss_type_numlist[i] & BSS_NUM_MASK;
+			intf_priv = mwifiex_get_priv_by_id(adapter, bss_num,
+							   bss_type);
+			if (!intf_priv) {
+				mwifiex_dbg(adapter, ERROR,
+					    "Invalid bss_type bss_num\t"
+					    "in multi channel event\n");
+				continue;
+			}
+			if (adapter->iface_type == MWIFIEX_USB) {
+				u8 ep;
+
+				ep = grp_info->hid_num.usb_ep_num;
+				if (ep == MWIFIEX_USB_EP_DATA ||
+				    ep == MWIFIEX_USB_EP_DATA_CH2)
+					intf_priv->usb_port = ep;
+			}
+		}
+
+		tlv_buf_left -= sizeof(struct mwifiex_ie_types_header) +
+				tlv_len;
+		tlv = (void *)((u8 *)tlv + tlv_len +
+			       sizeof(struct mwifiex_ie_types_header));
+	}
+
+	if (adapter->iface_type == MWIFIEX_USB) {
+		adapter->tx_lock_flag = true;
+		adapter->usb_mc_setup = true;
+		mwifiex_multi_chan_resync(adapter);
 	}
 }
 
@@ -562,7 +616,9 @@
 		adapter->tx_lock_flag = false;
 		if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
 			if (mwifiex_check_last_packet_indication(priv)) {
-				if (adapter->data_sent) {
+				if (adapter->data_sent ||
+				    (adapter->if_ops.is_port_ready &&
+				     !adapter->if_ops.is_port_ready(priv))) {
 					adapter->ps_state = PS_STATE_AWAKE;
 					adapter->pm_wakeup_card_req = false;
 					adapter->pm_wakeup_fw_try = false;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 355ac59..f6683ea 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -153,6 +153,10 @@
 	if (adapter->data_sent)
 		return -1;
 
+	if (adapter->if_ops.is_port_ready &&
+	    !adapter->if_ops.is_port_ready(priv))
+		return -1;
+
 	skb = dev_alloc_skb(data_len);
 	if (!skb)
 		return -1;
@@ -174,7 +178,7 @@
 	local_tx_pd->bss_type = priv->bss_type;
 
 	if (adapter->iface_type == MWIFIEX_USB) {
-		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
+		ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
 						   skb, NULL);
 	} else {
 		skb_push(skb, INTF_HEADER_LEN);
@@ -191,7 +195,6 @@
 		adapter->dbg.num_tx_host_to_card_failure++;
 		break;
 	case -1:
-		adapter->data_sent = false;
 		dev_kfree_skb_any(skb);
 		mwifiex_dbg(adapter, ERROR,
 			    "%s: host_to_card failed: ret=%d\n",
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index b3e163d..9275f9c 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -204,6 +204,12 @@
 		return -1;
 	}
 
+	if (!(le16_to_cpu(sta_ptr->tdls_cap.ht_capb.cap_info))) {
+		mwifiex_dbg(priv->adapter, WARN,
+			    "TDLS peer doesn't support ht capabilities\n");
+		return 0;
+	}
+
 	pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_operation) + 2);
 	*pos++ = WLAN_EID_HT_OPERATION;
 	*pos++ = sizeof(struct ieee80211_ht_operation);
@@ -252,6 +258,12 @@
 		return -1;
 	}
 
+	if (!(le32_to_cpu(sta_ptr->tdls_cap.vhtcap.vht_cap_info))) {
+		mwifiex_dbg(adapter, WARN,
+			    "TDLS peer doesn't support vht capabilities\n");
+		return 0;
+	}
+
 	if (!mwifiex_is_bss_in_11ac_mode(priv)) {
 		if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
 		   WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 8b1e5b5..bf6182b 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -115,9 +115,8 @@
 		if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
 			local_tx_pd = (struct txpd *)(head_ptr + hroom);
 		if (adapter->iface_type == MWIFIEX_USB) {
-			adapter->data_sent = true;
 			ret = adapter->if_ops.host_to_card(adapter,
-							   MWIFIEX_USB_EP_DATA,
+							   priv->usb_port,
 							   skb, NULL);
 		} else {
 			ret = adapter->if_ops.host_to_card(adapter,
@@ -130,7 +129,7 @@
 
 	switch (ret) {
 	case -ENOSR:
-		mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n");
+		mwifiex_dbg(adapter, DATA, "data: -ENOSR is returned\n");
 		break;
 	case -EBUSY:
 		if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
@@ -142,8 +141,6 @@
 		mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
 		break;
 	case -1:
-		if (adapter->iface_type != MWIFIEX_PCIE)
-			adapter->data_sent = false;
 		mwifiex_dbg(adapter, ERROR,
 			    "mwifiex_write_data_async failed: 0x%X\n",
 			    ret);
@@ -151,8 +148,6 @@
 		mwifiex_write_data_complete(adapter, skb, 0, ret);
 		break;
 	case -EINPROGRESS:
-		if (adapter->iface_type != MWIFIEX_PCIE)
-			adapter->data_sent = false;
 		break;
 	case 0:
 		mwifiex_write_data_complete(adapter, skb, 0, ret);
@@ -193,9 +188,8 @@
 	}
 
 	if (adapter->iface_type == MWIFIEX_USB) {
-		adapter->data_sent = true;
 		ret = adapter->if_ops.host_to_card(adapter,
-						   MWIFIEX_USB_EP_DATA,
+						   priv->usb_port,
 						   skb, NULL);
 	} else {
 		ret = adapter->if_ops.host_to_card(adapter,
@@ -222,16 +216,12 @@
 		mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
 		break;
 	case -1:
-		if (adapter->iface_type != MWIFIEX_PCIE)
-			adapter->data_sent = false;
 		mwifiex_dbg(adapter, ERROR,
 			    "mwifiex_write_data_async failed: 0x%X\n", ret);
 		adapter->dbg.num_tx_host_to_card_failure++;
 		mwifiex_write_data_complete(adapter, skb, 0, ret);
 		break;
 	case -EINPROGRESS:
-		if (adapter->iface_type != MWIFIEX_PCIE)
-			adapter->data_sent = false;
 		break;
 	case 0:
 		mwifiex_write_data_complete(adapter, skb, 0, ret);
@@ -306,9 +296,6 @@
 	if (!priv)
 		goto done;
 
-	if (adapter->iface_type == MWIFIEX_USB)
-		adapter->data_sent = false;
-
 	mwifiex_set_trans_start(priv->netdev);
 	if (!status) {
 		priv->stats.tx_packets++;
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 4d5a6e3..759a6ad 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -846,22 +846,6 @@
 {
 	enum state_11d_t state_11d;
 
-	if (mwifiex_del_mgmt_ies(priv))
-		mwifiex_dbg(priv->adapter, ERROR,
-			    "Failed to delete mgmt IEs!\n");
-
-	if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
-			     HostCmd_ACT_GEN_SET, 0, NULL, true)) {
-		mwifiex_dbg(priv->adapter, ERROR, "Failed to stop the BSS\n");
-		return -1;
-	}
-
-	if (mwifiex_send_cmd(priv, HOST_CMD_APCMD_SYS_RESET,
-			     HostCmd_ACT_GEN_SET, 0, NULL, true)) {
-		mwifiex_dbg(priv->adapter, ERROR, "Failed to reset BSS\n");
-		return -1;
-	}
-
 	if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
 			     HostCmd_ACT_GEN_SET,
 			     UAP_BSS_PARAMS_I, bss_cfg, false)) {
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 46c972a..86ff542 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -179,19 +179,12 @@
 	case EVENT_UAP_BSS_IDLE:
 		priv->media_connected = false;
 		priv->port_open = false;
-		if (netif_carrier_ok(priv->netdev))
-			netif_carrier_off(priv->netdev);
-		mwifiex_stop_net_dev_queue(priv->netdev, adapter);
-
 		mwifiex_clean_txrx(priv);
 		mwifiex_del_all_sta_list(priv);
 		break;
 	case EVENT_UAP_BSS_ACTIVE:
 		priv->media_connected = true;
 		priv->port_open = true;
-		if (!netif_carrier_ok(priv->netdev))
-			netif_carrier_on(priv->netdev);
-		mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
 		break;
 	case EVENT_UAP_BSS_START:
 		mwifiex_dbg(adapter, EVENT,
@@ -269,7 +262,9 @@
 		adapter->tx_lock_flag = false;
 		if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
 			if (mwifiex_check_last_packet_indication(priv)) {
-				if (adapter->data_sent) {
+				if (adapter->data_sent ||
+				    (adapter->if_ops.is_port_ready &&
+				     !adapter->if_ops.is_port_ready(priv))) {
 					adapter->ps_state = PS_STATE_AWAKE;
 					adapter->pm_wakeup_card_req = false;
 					adapter->pm_wakeup_fw_try = false;
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 8766741..74d5d72 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -31,7 +31,8 @@
  */
 static bool
 mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv,
-				  struct list_head *ra_list_head)
+				  struct list_head *ra_list_head,
+				  int tid)
 {
 	struct mwifiex_ra_list_tbl *ra_list;
 	struct sk_buff *skb, *tmp;
@@ -49,7 +50,10 @@
 				__skb_unlink(skb, &ra_list->skb_head);
 				mwifiex_write_data_complete(adapter, skb, 0,
 							    -1);
-				atomic_dec(&priv->wmm.tx_pkts_queued);
+				if (ra_list->tx_paused)
+					priv->wmm.pkts_paused[tid]--;
+				else
+					atomic_dec(&priv->wmm.tx_pkts_queued);
 				pkt_deleted = true;
 			}
 			if ((atomic_read(&adapter->pending_bridged_pkts) <=
@@ -77,7 +81,7 @@
 		if (priv->del_list_idx == MAX_NUM_TID)
 			priv->del_list_idx = 0;
 		ra_list = &priv->wmm.tid_tbl_ptr[priv->del_list_idx].ra_list;
-		if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list)) {
+		if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list, i)) {
 			priv->del_list_idx++;
 			break;
 		}
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 5e789b2..e43aff9 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -42,11 +42,6 @@
 	{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8801_PID_2,
 				       USB_CLASS_VENDOR_SPEC,
 				       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
-	/* 8897 */
-	{USB_DEVICE(USB8XXX_VID, USB8897_PID_1)},
-	{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
-				       USB_CLASS_VENDOR_SPEC,
-				       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
 	/* 8997 */
 	{USB_DEVICE(USB8XXX_VID, USB8997_PID_1)},
 	{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2,
@@ -264,6 +259,8 @@
 	struct urb_context *context = (struct urb_context *)(urb->context);
 	struct mwifiex_adapter *adapter = context->adapter;
 	struct usb_card_rec *card = adapter->card;
+	struct usb_tx_data_port *port;
+	int i;
 
 	mwifiex_dbg(adapter, INFO,
 		    "%s: status: %d\n", __func__, urb->status);
@@ -276,11 +273,22 @@
 	} else {
 		mwifiex_dbg(adapter, DATA,
 			    "%s: DATA\n", __func__);
-		atomic_dec(&card->tx_data_urb_pending);
+		for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
+			port = &card->port[i];
+			if (context->ep == port->tx_data_ep) {
+				atomic_dec(&port->tx_data_urb_pending);
+				port->block_status = false;
+				break;
+			}
+		}
+		adapter->data_sent = false;
 		mwifiex_write_data_complete(adapter, context->skb, 0,
 					    urb->status ? -1 : 0);
 	}
 
+	if (card->mc_resync_flag)
+		mwifiex_multi_chan_resync(adapter);
+
 	mwifiex_queue_main_work(adapter);
 
 	return;
@@ -327,7 +335,8 @@
 
 static void mwifiex_usb_free(struct usb_card_rec *card)
 {
-	int i;
+	struct usb_tx_data_port *port;
+	int i, j;
 
 	if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
 		usb_kill_urb(card->rx_cmd.urb);
@@ -345,9 +354,12 @@
 		card->rx_data_list[i].urb = NULL;
 	}
 
-	for (i = 0; i < MWIFIEX_TX_DATA_URB; i++) {
-		usb_free_urb(card->tx_data_list[i].urb);
-		card->tx_data_list[i].urb = NULL;
+	for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
+		port = &card->port[i];
+		for (j = 0; j < MWIFIEX_TX_DATA_URB; j++) {
+			usb_free_urb(port->tx_data_list[j].urb);
+			port->tx_data_list[j].urb = NULL;
+		}
 	}
 
 	usb_free_urb(card->tx_cmd.urb);
@@ -386,14 +398,12 @@
 	case USB8766_PID_1:
 	case USB8797_PID_1:
 	case USB8801_PID_1:
-	case USB8897_PID_1:
 	case USB8997_PID_1:
 		card->usb_boot_state = USB8XXX_FW_DNLD;
 		break;
 	case USB8766_PID_2:
 	case USB8797_PID_2:
 	case USB8801_PID_2:
-	case USB8897_PID_2:
 	case USB8997_PID_2:
 		card->usb_boot_state = USB8XXX_FW_READY;
 		break;
@@ -437,8 +447,18 @@
 			pr_debug("info: bulk OUT: max pkt size: %d, addr: %d\n",
 				 le16_to_cpu(epd->wMaxPacketSize),
 				 epd->bEndpointAddress);
-			card->tx_data_ep = usb_endpoint_num(epd);
-			atomic_set(&card->tx_data_urb_pending, 0);
+			card->port[0].tx_data_ep = usb_endpoint_num(epd);
+			atomic_set(&card->port[0].tx_data_urb_pending, 0);
+		}
+		if (usb_endpoint_dir_out(epd) &&
+		    usb_endpoint_num(epd) == MWIFIEX_USB_EP_DATA_CH2 &&
+		    usb_endpoint_xfer_bulk(epd)) {
+			pr_debug("info: bulk OUT chan2:\t"
+				 "max pkt size: %d, addr: %d\n",
+				 le16_to_cpu(epd->wMaxPacketSize),
+				 epd->bEndpointAddress);
+			card->port[1].tx_data_ep = usb_endpoint_num(epd);
+			atomic_set(&card->port[1].tx_data_urb_pending, 0);
 		}
 		if (usb_endpoint_dir_out(epd) &&
 		    usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT &&
@@ -480,7 +500,8 @@
 {
 	struct usb_card_rec *card = usb_get_intfdata(intf);
 	struct mwifiex_adapter *adapter;
-	int i;
+	struct usb_tx_data_port *port;
+	int i, j;
 
 	if (!card || !card->adapter) {
 		pr_err("%s: card or card->adapter is NULL\n", __func__);
@@ -511,9 +532,13 @@
 			if (card->rx_data_list[i].urb)
 				usb_kill_urb(card->rx_data_list[i].urb);
 
-	for (i = 0; i < MWIFIEX_TX_DATA_URB; i++)
-		if (card->tx_data_list[i].urb)
-			usb_kill_urb(card->tx_data_list[i].urb);
+	for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
+		port = &card->port[i];
+		for (j = 0; j < MWIFIEX_TX_DATA_URB; j++) {
+			if (port->tx_data_list[j].urb)
+				usb_kill_urb(port->tx_data_list[j].urb);
+		}
+	}
 
 	if (card->tx_cmd.urb)
 		usb_kill_urb(card->tx_cmd.urb);
@@ -625,7 +650,8 @@
 static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
 {
 	struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
-	int i;
+	struct usb_tx_data_port *port;
+	int i, j;
 
 	card->tx_cmd.adapter = adapter;
 	card->tx_cmd.ep = card->tx_cmd_ep;
@@ -637,17 +663,25 @@
 		return -ENOMEM;
 	}
 
-	card->tx_data_ix = 0;
-
-	for (i = 0; i < MWIFIEX_TX_DATA_URB; i++) {
-		card->tx_data_list[i].adapter = adapter;
-		card->tx_data_list[i].ep = card->tx_data_ep;
-
-		card->tx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
-		if (!card->tx_data_list[i].urb) {
-			mwifiex_dbg(adapter, ERROR,
-				    "tx_data_list[] urb allocation failed\n");
-			return -ENOMEM;
+	for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
+		port = &card->port[i];
+		if (!port->tx_data_ep)
+			continue;
+		port->tx_data_ix = 0;
+		if (port->tx_data_ep == MWIFIEX_USB_EP_DATA)
+			port->block_status = false;
+		else
+			port->block_status = true;
+		for (j = 0; j < MWIFIEX_TX_DATA_URB; j++) {
+			port->tx_data_list[j].adapter = adapter;
+			port->tx_data_list[j].ep = port->tx_data_ep;
+			port->tx_data_list[j].urb =
+					usb_alloc_urb(0, GFP_KERNEL);
+			if (!port->tx_data_list[j].urb) {
+				mwifiex_dbg(adapter, ERROR,
+					    "urb allocation failed\n");
+				return -ENOMEM;
+			}
 		}
 	}
 
@@ -736,15 +770,89 @@
 	return ret;
 }
 
+static void mwifiex_usb_port_resync(struct mwifiex_adapter *adapter)
+{
+	struct usb_card_rec *card = adapter->card;
+	u8 active_port = MWIFIEX_USB_EP_DATA;
+	struct mwifiex_private *priv = NULL;
+	int i;
+
+	if (adapter->usb_mc_status) {
+		for (i = 0; i < adapter->priv_num; i++) {
+			priv = adapter->priv[i];
+			if (!priv)
+				continue;
+			if ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP &&
+			     !priv->bss_started) ||
+			    (priv->bss_role == MWIFIEX_BSS_ROLE_STA &&
+			     !priv->media_connected))
+				priv->usb_port = MWIFIEX_USB_EP_DATA;
+		}
+		for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++)
+			card->port[i].block_status = false;
+	} else {
+		for (i = 0; i < adapter->priv_num; i++) {
+			priv = adapter->priv[i];
+			if (!priv)
+				continue;
+			if ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP &&
+			     priv->bss_started) ||
+			    (priv->bss_role == MWIFIEX_BSS_ROLE_STA &&
+			     priv->media_connected)) {
+				active_port = priv->usb_port;
+				break;
+			}
+		}
+		for (i = 0; i < adapter->priv_num; i++) {
+			priv = adapter->priv[i];
+			if (priv)
+				priv->usb_port = active_port;
+		}
+		for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++) {
+			if (active_port == card->port[i].tx_data_ep)
+				card->port[i].block_status = false;
+			else
+				card->port[i].block_status = true;
+		}
+	}
+}
+
+static bool mwifiex_usb_is_port_ready(struct mwifiex_private *priv)
+{
+	struct usb_card_rec *card = priv->adapter->card;
+	int idx;
+
+	for (idx = 0; idx < MWIFIEX_TX_DATA_PORT; idx++) {
+		if (priv->usb_port == card->port[idx].tx_data_ep)
+			return !card->port[idx].block_status;
+	}
+
+	return false;
+}
+
+static inline u8 mwifiex_usb_data_sent(struct mwifiex_adapter *adapter)
+{
+	struct usb_card_rec *card = adapter->card;
+	int i;
+
+	for (i = 0; i < MWIFIEX_TX_DATA_PORT; i++)
+		if (!card->port[i].block_status)
+			return false;
+
+	return true;
+}
+
 /* This function write a command/data packet to card. */
 static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
 				    struct sk_buff *skb,
 				    struct mwifiex_tx_param *tx_param)
 {
 	struct usb_card_rec *card = adapter->card;
-	struct urb_context *context;
+	struct urb_context *context = NULL;
+	struct usb_tx_data_port *port = NULL;
 	u8 *data = (u8 *)skb->data;
 	struct urb *tx_urb;
+	int idx, ret;
 
 	if (adapter->is_suspended) {
 		mwifiex_dbg(adapter, ERROR,
@@ -757,19 +865,31 @@
 		return -1;
 	}
 
-	if (ep == card->tx_data_ep &&
-	    atomic_read(&card->tx_data_urb_pending) >= MWIFIEX_TX_DATA_URB) {
-		return -EBUSY;
-	}
-
 	mwifiex_dbg(adapter, INFO, "%s: ep=%d\n", __func__, ep);
 
 	if (ep == card->tx_cmd_ep) {
 		context = &card->tx_cmd;
 	} else {
-		if (card->tx_data_ix >= MWIFIEX_TX_DATA_URB)
-			card->tx_data_ix = 0;
-		context = &card->tx_data_list[card->tx_data_ix++];
+		for (idx = 0; idx < MWIFIEX_TX_DATA_PORT; idx++) {
+			if (ep == card->port[idx].tx_data_ep) {
+				port = &card->port[idx];
+				if (atomic_read(&port->tx_data_urb_pending)
+				    >= MWIFIEX_TX_DATA_URB) {
+					port->block_status = true;
+					ret = -EBUSY;
+					goto done;
+				}
+				if (port->tx_data_ix >= MWIFIEX_TX_DATA_URB)
+					port->tx_data_ix = 0;
+				context =
+					&port->tx_data_list[port->tx_data_ix++];
+				break;
+			}
+		}
+		if (!port) {
+			mwifiex_dbg(adapter, ERROR, "Wrong usb tx data port\n");
+			return -1;
+		}
 	}
 
 	context->adapter = adapter;
@@ -786,7 +906,7 @@
 	if (ep == card->tx_cmd_ep)
 		atomic_inc(&card->tx_cmd_urb_pending);
 	else
-		atomic_inc(&card->tx_data_urb_pending);
+		atomic_inc(&port->tx_data_urb_pending);
 
 	if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
 		mwifiex_dbg(adapter, ERROR,
@@ -794,22 +914,32 @@
 		if (ep == card->tx_cmd_ep) {
 			atomic_dec(&card->tx_cmd_urb_pending);
 		} else {
-			atomic_dec(&card->tx_data_urb_pending);
-			if (card->tx_data_ix)
-				card->tx_data_ix--;
+			atomic_dec(&port->tx_data_urb_pending);
+			port->block_status = false;
+			if (port->tx_data_ix)
+				port->tx_data_ix--;
 			else
-				card->tx_data_ix = MWIFIEX_TX_DATA_URB;
+				port->tx_data_ix = MWIFIEX_TX_DATA_URB;
 		}
 
 		return -1;
 	} else {
-		if (ep == card->tx_data_ep &&
-		    atomic_read(&card->tx_data_urb_pending) ==
-							MWIFIEX_TX_DATA_URB)
-			return -ENOSR;
+		if (ep != card->tx_cmd_ep &&
+		    atomic_read(&port->tx_data_urb_pending) ==
+							MWIFIEX_TX_DATA_URB) {
+			port->block_status = true;
+			ret = -ENOSR;
+			goto done;
+		}
 	}
 
 	return -EINPROGRESS;
+
+done:
+	if (ep != card->tx_cmd_ep)
+		adapter->data_sent = mwifiex_usb_data_sent(adapter);
+
+	return ret;
 }
 
 /* This function register usb device and initialize parameter. */
@@ -827,12 +957,6 @@
 		strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME);
 		adapter->ext_scan = true;
 		break;
-	case USB8897_PID_1:
-	case USB8897_PID_2:
-		adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
-		strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
-		adapter->ext_scan = true;
-		break;
 	case USB8766_PID_1:
 	case USB8766_PID_2:
 		adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
@@ -853,6 +977,9 @@
 		break;
 	}
 
+	adapter->usb_mc_status = false;
+	adapter->usb_mc_setup = false;
+
 	return 0;
 }
 
@@ -1082,6 +1209,8 @@
 	.event_complete =	mwifiex_usb_cmd_event_complete,
 	.host_to_card =		mwifiex_usb_host_to_card,
 	.submit_rem_rx_urbs =	mwifiex_usb_submit_rem_rx_urbs,
+	.multi_port_resync =	mwifiex_usb_port_resync,
+	.is_port_ready =	mwifiex_usb_is_port_ready,
 };
 
 /* This function initializes the USB driver module.
@@ -1135,5 +1264,4 @@
 MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME);
-MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index f0051f8..b4e9246 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -28,11 +28,9 @@
 #define USB8766_PID_2		0x2042
 #define USB8797_PID_1		0x2043
 #define USB8797_PID_2		0x2044
-#define USB8897_PID_1		0x2045
-#define USB8897_PID_2		0x2046
 #define USB8801_PID_1		0x2049
 #define USB8801_PID_2		0x204a
-#define USB8997_PID_1		0x204d
+#define USB8997_PID_1		0x2052
 #define USB8997_PID_2		0x204e
 
 
@@ -40,6 +38,7 @@
 #define USB8XXX_FW_READY	2
 #define USB8XXX_FW_MAX_RETRY	3
 
+#define MWIFIEX_TX_DATA_PORT	2
 #define MWIFIEX_TX_DATA_URB	6
 #define MWIFIEX_RX_DATA_URB	6
 #define MWIFIEX_USB_TIMEOUT	100
@@ -47,7 +46,6 @@
 #define USB8766_DEFAULT_FW_NAME	"mrvl/usb8766_uapsta.bin"
 #define USB8797_DEFAULT_FW_NAME	"mrvl/usb8797_uapsta.bin"
 #define USB8801_DEFAULT_FW_NAME	"mrvl/usb8801_uapsta.bin"
-#define USB8897_DEFAULT_FW_NAME	"mrvl/usb8897_uapsta.bin"
 #define USB8997_DEFAULT_FW_NAME	"mrvl/usb8997_uapsta.bin"
 
 #define FW_DNLD_TX_BUF_SIZE	620
@@ -64,6 +62,14 @@
 	u8 ep;
 };
 
+struct usb_tx_data_port {
+	u8 tx_data_ep;
+	u8 block_status;
+	atomic_t tx_data_urb_pending;
+	int tx_data_ix;
+	struct urb_context tx_data_list[MWIFIEX_TX_DATA_URB];
+};
+
 struct usb_card_rec {
 	struct mwifiex_adapter *adapter;
 	struct usb_device *udev;
@@ -75,14 +81,12 @@
 	u8 usb_boot_state;
 	u8 rx_data_ep;
 	atomic_t rx_data_urb_pending;
-	u8 tx_data_ep;
 	u8 tx_cmd_ep;
-	atomic_t tx_data_urb_pending;
 	atomic_t tx_cmd_urb_pending;
 	int bulk_out_maxpktsize;
 	struct urb_context tx_cmd;
-	int tx_data_ix;
-	struct urb_context tx_data_list[MWIFIEX_TX_DATA_URB];
+	u8 mc_resync_flag;
+	struct usb_tx_data_port port[MWIFIEX_TX_DATA_PORT];
 };
 
 struct fw_header {
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 173d366..acccd67 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -117,22 +117,15 @@
  */
 static u8 mwifiex_get_random_ba_threshold(void)
 {
-	u32 sec, usec;
-	struct timeval ba_tstamp;
-	u8 ba_threshold;
-
+	u64 ns;
 	/* setup ba_packet_threshold here random number between
 	 * [BA_SETUP_PACKET_OFFSET,
 	 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
 	 */
+	ns = ktime_get_ns();
+	ns += (ns >> 32) + (ns >> 16);
 
-	do_gettimeofday(&ba_tstamp);
-	sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16);
-	usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16);
-	ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD)
-						      + BA_SETUP_PACKET_OFFSET;
-
-	return ba_threshold;
+	return ((u8)ns % BA_SETUP_MAX_PACKET_THRESHOLD) + BA_SETUP_PACKET_OFFSET;
 }
 
 /*
@@ -160,7 +153,6 @@
 		ra_list->tdls_link = false;
 		ra_list->ba_status = BA_SETUP_NONE;
 		ra_list->amsdu_in_ampdu = false;
-		ra_list->tx_paused = false;
 		if (!mwifiex_queuing_ra_based(priv)) {
 			if (mwifiex_is_tdls_link_setup
 				(mwifiex_get_tdls_link_status(priv, ra))) {
@@ -173,6 +165,8 @@
 		} else {
 			spin_lock_irqsave(&priv->sta_list_spinlock, flags);
 			node = mwifiex_get_sta_entry(priv, ra);
+			if (node)
+				ra_list->tx_paused = node->tx_pause;
 			ra_list->is_11n_enabled =
 				      mwifiex_is_sta_11n_enabled(priv, node);
 			if (ra_list->is_11n_enabled)
@@ -451,7 +445,21 @@
 
 int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
 {
-	return atomic_read(&adapter->bypass_tx_pending) ? false : true;
+	struct mwifiex_private *priv;
+	int i;
+
+	for (i = 0; i < adapter->priv_num; i++) {
+		priv = adapter->priv[i];
+		if (!priv)
+			continue;
+		if (adapter->if_ops.is_port_ready &&
+		    !adapter->if_ops.is_port_ready(priv))
+			continue;
+		if (!skb_queue_empty(&priv->bypass_txq))
+			return false;
+	}
+
+	return true;
 }
 
 /*
@@ -465,9 +473,14 @@
 
 	for (i = 0; i < adapter->priv_num; ++i) {
 		priv = adapter->priv[i];
-		if (priv && !priv->port_open)
+		if (!priv)
 			continue;
-		if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
+		if (!priv->port_open)
+			continue;
+		if (adapter->if_ops.is_port_ready &&
+		    !adapter->if_ops.is_port_ready(priv))
+			continue;
+		if (atomic_read(&priv->wmm.tx_pkts_queued))
 			return false;
 	}
 
@@ -671,7 +684,7 @@
 			if (!memcmp(ra_list->ra, mac, ETH_ALEN))
 				continue;
 
-			if (ra_list && ra_list->tx_paused != tx_pause) {
+			if (ra_list->tx_paused != tx_pause) {
 				pkt_cnt += ra_list->total_pkt_count;
 				ra_list->tx_paused = tx_pause;
 				if (tx_pause)
@@ -737,7 +750,11 @@
 		if (!ra_list)
 			continue;
 		mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
-		atomic_sub(ra_list->total_pkt_count, &priv->wmm.tx_pkts_queued);
+		if (ra_list->tx_paused)
+			priv->wmm.pkts_paused[i] -= ra_list->total_pkt_count;
+		else
+			atomic_sub(ra_list->total_pkt_count,
+				   &priv->wmm.tx_pkts_queued);
 		list_del(&ra_list->list);
 		kfree(ra_list);
 	}
@@ -1086,6 +1103,10 @@
 			    (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
 				continue;
 
+			if (adapter->if_ops.is_port_ready &&
+			    !adapter->if_ops.is_port_ready(priv_tmp))
+				continue;
+
 			/* iterate over the WMM queues of the BSS */
 			hqp = &priv_tmp->wmm.highest_queued_prio;
 			for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
@@ -1321,8 +1342,7 @@
 	spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
 
 	if (adapter->iface_type == MWIFIEX_USB) {
-		adapter->data_sent = true;
-		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
+		ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
 						   skb, NULL);
 	} else {
 		tx_param.next_pkt_len =
@@ -1351,15 +1371,11 @@
 				       ra_list_flags);
 		break;
 	case -1:
-		if (adapter->iface_type != MWIFIEX_PCIE)
-			adapter->data_sent = false;
 		mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
 		adapter->dbg.num_tx_host_to_card_failure++;
 		mwifiex_write_data_complete(adapter, skb, 0, ret);
 		break;
 	case -EINPROGRESS:
-		if (adapter->iface_type != MWIFIEX_PCIE)
-			adapter->data_sent = false;
 		break;
 	case 0:
 		mwifiex_write_data_complete(adapter, skb, 0, ret);
@@ -1467,6 +1483,13 @@
 	for (i = 0; i < adapter->priv_num; ++i) {
 		priv = adapter->priv[i];
 
+		if (!priv)
+			continue;
+
+		if (adapter->if_ops.is_port_ready &&
+		    !adapter->if_ops.is_port_ready(priv))
+			continue;
+
 		if (skb_queue_empty(&priv->bypass_txq))
 			continue;
 
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 9420fc6..30e3aaa 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -5423,7 +5423,7 @@
 mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		   enum ieee80211_ampdu_mlme_action action,
 		   struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-		   u8 buf_size)
+		   u8 buf_size, bool amsdu)
 {
 
 	int i, rc = 0;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 26a57d7..f2cd513 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1576,6 +1576,7 @@
 				ezusb_hard_reset, NULL);
 	if (!priv) {
 		err("Couldn't allocate orinocodev");
+		retval = -ENOMEM;
 		goto exit;
 	}
 
diff --git a/drivers/net/wireless/realtek/Makefile b/drivers/net/wireless/realtek/Makefile
new file mode 100644
index 0000000..9c78deb
--- /dev/null
+++ b/drivers/net/wireless/realtek/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Linux Wireless network device drivers for Realtek units
+#
+
+obj-$(CONFIG_RTL8180)		+= rtl818x/
+obj-$(CONFIG_RTL8187)		+= rtl818x/
+obj-$(CONFIG_RTLWIFI)		+= rtlwifi/
+obj-$(CONFIG_RTL8XXXU)		+= rtl8xxxu/
+
diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/realtek/rtl818x/Kconfig
similarity index 100%
rename from drivers/net/wireless/rtl818x/Kconfig
rename to drivers/net/wireless/realtek/rtl818x/Kconfig
diff --git a/drivers/net/wireless/rtl818x/Makefile b/drivers/net/wireless/realtek/rtl818x/Makefile
similarity index 100%
rename from drivers/net/wireless/rtl818x/Makefile
rename to drivers/net/wireless/realtek/rtl818x/Makefile
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
similarity index 69%
rename from drivers/net/wireless/rtl818x/rtl8180/Makefile
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
index 21005bd..2966681 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
@@ -2,4 +2,4 @@
 
 obj-$(CONFIG_RTL8180)	+= rtl818x_pci.o
 
-ccflags-y += -Idrivers/net/wireless/rtl818x
+ccflags-y += -Idrivers/net/wireless/realtek/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/dev.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/grf5101.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/grf5101.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/max2820.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/max2820.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8225.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/sa2400.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8180/sa2400.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
similarity index 62%
rename from drivers/net/wireless/rtl818x/rtl8187/Makefile
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
index 7b62992..ff07491 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
@@ -2,4 +2,4 @@
 
 obj-$(CONFIG_RTL8187)	+= rtl8187.o
 
-ccflags-y += -Idrivers/net/wireless/rtl818x
+ccflags-y += -Idrivers/net/wireless/realtek/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/dev.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/leds.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/leds.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/leds.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/leds.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rfkill.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rfkill.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rfkill.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rfkill.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
rename to drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.h
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/realtek/rtl818x/rtl818x.h
similarity index 100%
rename from drivers/net/wireless/rtl818x/rtl818x.h
rename to drivers/net/wireless/realtek/rtl818x/rtl818x.h
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
new file mode 100644
index 0000000..dd4d626
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
@@ -0,0 +1,34 @@
+#
+# RTL8XXXU Wireless LAN device configuration
+#
+config RTL8XXXU
+	tristate "RTL8723AU/RTL8188[CR]U/RTL819[12]CU (mac80211) support"
+	depends on MAC80211 && USB
+	---help---
+	  This is an alternative driver for various Realtek RTL8XXX
+	  parts written to utilize the Linux mac80211 stack.
+	  The driver is known to work with a number of RTL8723AU,
+	  RL8188CU, RTL8188RU, RTL8191CU, and RTL8192CU devices
+
+	  This driver is under development and has a limited feature
+	  set. In particular it does not yet support 40MHz channels
+	  and power management. However it should have a smaller
+	  memory footprint than the vendor drivers and benetifs
+	  from the in kernel mac80211 stack.
+
+	  It can coexist with drivers from drivers/staging/rtl8723au,
+	  drivers/staging/rtl8192u, and drivers/net/wireless/rtlwifi,
+	  but you will need to control which module you wish to load.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called r8xxxu.  If unsure, say N.
+
+config RTL8XXXU_UNTESTED
+	bool "Include support for untested Realtek 8xxx USB devices (EXPERIMENTAL)"
+	depends on RTL8XXXU
+	---help---
+	  This option enables detection of Realtek 8723/8188/8191/8192 WiFi
+	  USB devices which have not been tested directly by the driver
+	  author or reported to be working by third parties.
+
+	  Please report your results!
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
new file mode 100644
index 0000000..5dea3bb
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RTL8XXXU)	+= rtl8xxxu.o
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
new file mode 100644
index 0000000..6aed923
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
@@ -0,0 +1,5993 @@
+/*
+ * RTL8XXXU mac80211 USB driver
+ *
+ * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+#define DRIVER_NAME "rtl8xxxu"
+
+static int rtl8xxxu_debug;
+static bool rtl8xxxu_ht40_2g;
+
+MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
+MODULE_DESCRIPTION("RTL8XXXu USB mac80211 Wireless LAN Driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
+
+module_param_named(debug, rtl8xxxu_debug, int, 0600);
+MODULE_PARM_DESC(debug, "Set debug mask");
+module_param_named(ht40_2g, rtl8xxxu_ht40_2g, bool, 0600);
+MODULE_PARM_DESC(ht40_2g, "Enable HT40 support on the 2.4GHz band");
+
+#define USB_VENDOR_ID_REALTEK		0x0bda
+/* Minimum IEEE80211_MAX_FRAME_LEN */
+#define RTL_RX_BUFFER_SIZE		IEEE80211_MAX_FRAME_LEN
+#define RTL8XXXU_RX_URBS		32
+#define RTL8XXXU_RX_URB_PENDING_WATER	8
+#define RTL8XXXU_TX_URBS		64
+#define RTL8XXXU_TX_URB_LOW_WATER	25
+#define RTL8XXXU_TX_URB_HIGH_WATER	32
+
+static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
+				  struct rtl8xxxu_rx_urb *rx_urb);
+
+static struct ieee80211_rate rtl8xxxu_rates[] = {
+	{ .bitrate = 10, .hw_value = DESC_RATE_1M, .flags = 0 },
+	{ .bitrate = 20, .hw_value = DESC_RATE_2M, .flags = 0 },
+	{ .bitrate = 55, .hw_value = DESC_RATE_5_5M, .flags = 0 },
+	{ .bitrate = 110, .hw_value = DESC_RATE_11M, .flags = 0 },
+	{ .bitrate = 60, .hw_value = DESC_RATE_6M, .flags = 0 },
+	{ .bitrate = 90, .hw_value = DESC_RATE_9M, .flags = 0 },
+	{ .bitrate = 120, .hw_value = DESC_RATE_12M, .flags = 0 },
+	{ .bitrate = 180, .hw_value = DESC_RATE_18M, .flags = 0 },
+	{ .bitrate = 240, .hw_value = DESC_RATE_24M, .flags = 0 },
+	{ .bitrate = 360, .hw_value = DESC_RATE_36M, .flags = 0 },
+	{ .bitrate = 480, .hw_value = DESC_RATE_48M, .flags = 0 },
+	{ .bitrate = 540, .hw_value = DESC_RATE_54M, .flags = 0 },
+};
+
+static struct ieee80211_channel rtl8xxxu_channels_2g[] = {
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+	  .hw_value = 1, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+	  .hw_value = 2, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+	  .hw_value = 3, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+	  .hw_value = 4, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+	  .hw_value = 5, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+	  .hw_value = 6, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+	  .hw_value = 7, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+	  .hw_value = 8, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+	  .hw_value = 9, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+	  .hw_value = 10, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+	  .hw_value = 11, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+	  .hw_value = 12, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+	  .hw_value = 13, .max_power = 30 },
+	{ .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+	  .hw_value = 14, .max_power = 30 }
+};
+
+static struct ieee80211_supported_band rtl8xxxu_supported_band = {
+	.channels = rtl8xxxu_channels_2g,
+	.n_channels = ARRAY_SIZE(rtl8xxxu_channels_2g),
+	.bitrates = rtl8xxxu_rates,
+	.n_bitrates = ARRAY_SIZE(rtl8xxxu_rates),
+};
+
+static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = {
+	{0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
+	{0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+	{0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
+	{0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05},
+	{0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01},
+	{0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f},
+	{0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72},
+	{0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08},
+	{0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
+	{0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+	{0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+	{0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+	{0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+	{0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+	{0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16},
+	{0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00},
+	{0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02},
+	{0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
+	{0x652, 0x20}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
+	{0x63f, 0x0e}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
+	{0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
+	{0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff},
+};
+
+static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
+	{0x800, 0x80040000}, {0x804, 0x00000003},
+	{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+	{0x810, 0x10001331}, {0x814, 0x020c3d10},
+	{0x818, 0x02200385}, {0x81c, 0x00000000},
+	{0x820, 0x01000100}, {0x824, 0x00390004},
+	{0x828, 0x00000000}, {0x82c, 0x00000000},
+	{0x830, 0x00000000}, {0x834, 0x00000000},
+	{0x838, 0x00000000}, {0x83c, 0x00000000},
+	{0x840, 0x00010000}, {0x844, 0x00000000},
+	{0x848, 0x00000000}, {0x84c, 0x00000000},
+	{0x850, 0x00000000}, {0x854, 0x00000000},
+	{0x858, 0x569a569a}, {0x85c, 0x001b25a4},
+	{0x860, 0x66f60110}, {0x864, 0x061f0130},
+	{0x868, 0x00000000}, {0x86c, 0x32323200},
+	{0x870, 0x07000760}, {0x874, 0x22004000},
+	{0x878, 0x00000808}, {0x87c, 0x00000000},
+	{0x880, 0xc0083070}, {0x884, 0x000004d5},
+	{0x888, 0x00000000}, {0x88c, 0xccc000c0},
+	{0x890, 0x00000800}, {0x894, 0xfffffffe},
+	{0x898, 0x40302010}, {0x89c, 0x00706050},
+	{0x900, 0x00000000}, {0x904, 0x00000023},
+	{0x908, 0x00000000}, {0x90c, 0x81121111},
+	{0xa00, 0x00d047c8}, {0xa04, 0x80ff000c},
+	{0xa08, 0x8c838300}, {0xa0c, 0x2e68120f},
+	{0xa10, 0x9500bb78}, {0xa14, 0x11144028},
+	{0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+	{0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+	{0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+	{0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+	{0xa78, 0x00000900},
+	{0xc00, 0x48071d40}, {0xc04, 0x03a05611},
+	{0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+	{0xc10, 0x08800000}, {0xc14, 0x40000100},
+	{0xc18, 0x08800000}, {0xc1c, 0x40000100},
+	{0xc20, 0x00000000}, {0xc24, 0x00000000},
+	{0xc28, 0x00000000}, {0xc2c, 0x00000000},
+	{0xc30, 0x69e9ac44}, {0xc34, 0x469652af},
+	{0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+	{0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+	{0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+	{0xc50, 0x69543420}, {0xc54, 0x43bc0094},
+	{0xc58, 0x69543420}, {0xc5c, 0x433c0094},
+	{0xc60, 0x00000000}, {0xc64, 0x7112848b},
+	{0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+	{0xc70, 0x2c7f000d}, {0xc74, 0x018610db},
+	{0xc78, 0x0000001f}, {0xc7c, 0x00b91612},
+	{0xc80, 0x40000100}, {0xc84, 0x20f60000},
+	{0xc88, 0x40000100}, {0xc8c, 0x20200000},
+	{0xc90, 0x00121820}, {0xc94, 0x00000000},
+	{0xc98, 0x00121820}, {0xc9c, 0x00007f7f},
+	{0xca0, 0x00000000}, {0xca4, 0x00000080},
+	{0xca8, 0x00000000}, {0xcac, 0x00000000},
+	{0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+	{0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+	{0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+	{0xcc8, 0x00000000}, {0xccc, 0x00000000},
+	{0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+	{0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+	{0xce0, 0x00222222}, {0xce4, 0x00000000},
+	{0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+	{0xd00, 0x00080740}, {0xd04, 0x00020401},
+	{0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+	{0xd10, 0xa0633333}, {0xd14, 0x3333bc43},
+	{0xd18, 0x7a8f5b6b}, {0xd2c, 0xcc979975},
+	{0xd30, 0x00000000}, {0xd34, 0x80608000},
+	{0xd38, 0x00000000}, {0xd3c, 0x00027293},
+	{0xd40, 0x00000000}, {0xd44, 0x00000000},
+	{0xd48, 0x00000000}, {0xd4c, 0x00000000},
+	{0xd50, 0x6437140a}, {0xd54, 0x00000000},
+	{0xd58, 0x00000000}, {0xd5c, 0x30032064},
+	{0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+	{0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+	{0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+	{0xd78, 0x000e3c24}, {0xe00, 0x2a2a2a2a},
+	{0xe04, 0x2a2a2a2a}, {0xe08, 0x03902a2a},
+	{0xe10, 0x2a2a2a2a}, {0xe14, 0x2a2a2a2a},
+	{0xe18, 0x2a2a2a2a}, {0xe1c, 0x2a2a2a2a},
+	{0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+	{0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+	{0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+	{0xe44, 0x01004800}, {0xe48, 0xfb000000},
+	{0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+	{0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+	{0xe5c, 0x28160d05}, {0xe60, 0x00000008},
+	{0xe68, 0x001b25a4}, {0xe6c, 0x631b25a0},
+	{0xe70, 0x631b25a0}, {0xe74, 0x081b25a0},
+	{0xe78, 0x081b25a0}, {0xe7c, 0x081b25a0},
+	{0xe80, 0x081b25a0}, {0xe84, 0x631b25a0},
+	{0xe88, 0x081b25a0}, {0xe8c, 0x631b25a0},
+	{0xed0, 0x631b25a0}, {0xed4, 0x631b25a0},
+	{0xed8, 0x631b25a0}, {0xedc, 0x001b25a0},
+	{0xee0, 0x001b25a0}, {0xeec, 0x6b1b25a0},
+	{0xf14, 0x00000003}, {0xf4c, 0x00000000},
+	{0xf00, 0x00000300},
+	{0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8192cu_phy_2t_init_table[] = {
+	{0x024, 0x0011800f}, {0x028, 0x00ffdb83},
+	{0x800, 0x80040002}, {0x804, 0x00000003},
+	{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+	{0x810, 0x10000330}, {0x814, 0x020c3d10},
+	{0x818, 0x02200385}, {0x81c, 0x00000000},
+	{0x820, 0x01000100}, {0x824, 0x00390004},
+	{0x828, 0x01000100}, {0x82c, 0x00390004},
+	{0x830, 0x27272727}, {0x834, 0x27272727},
+	{0x838, 0x27272727}, {0x83c, 0x27272727},
+	{0x840, 0x00010000}, {0x844, 0x00010000},
+	{0x848, 0x27272727}, {0x84c, 0x27272727},
+	{0x850, 0x00000000}, {0x854, 0x00000000},
+	{0x858, 0x569a569a}, {0x85c, 0x0c1b25a4},
+	{0x860, 0x66e60230}, {0x864, 0x061f0130},
+	{0x868, 0x27272727}, {0x86c, 0x2b2b2b27},
+	{0x870, 0x07000700}, {0x874, 0x22184000},
+	{0x878, 0x08080808}, {0x87c, 0x00000000},
+	{0x880, 0xc0083070}, {0x884, 0x000004d5},
+	{0x888, 0x00000000}, {0x88c, 0xcc0000c0},
+	{0x890, 0x00000800}, {0x894, 0xfffffffe},
+	{0x898, 0x40302010}, {0x89c, 0x00706050},
+	{0x900, 0x00000000}, {0x904, 0x00000023},
+	{0x908, 0x00000000}, {0x90c, 0x81121313},
+	{0xa00, 0x00d047c8}, {0xa04, 0x80ff000c},
+	{0xa08, 0x8c838300}, {0xa0c, 0x2e68120f},
+	{0xa10, 0x9500bb78}, {0xa14, 0x11144028},
+	{0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+	{0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+	{0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+	{0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+	{0xc00, 0x48071d40}, {0xc04, 0x03a05633},
+	{0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+	{0xc10, 0x08800000}, {0xc14, 0x40000100},
+	{0xc18, 0x08800000}, {0xc1c, 0x40000100},
+	{0xc20, 0x00000000}, {0xc24, 0x00000000},
+	{0xc28, 0x00000000}, {0xc2c, 0x00000000},
+	{0xc30, 0x69e9ac44}, {0xc34, 0x469652cf},
+	{0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+	{0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+	{0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+	{0xc50, 0x69543420}, {0xc54, 0x43bc0094},
+	{0xc58, 0x69543420}, {0xc5c, 0x433c0094},
+	{0xc60, 0x00000000}, {0xc64, 0x5116848b},
+	{0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+	{0xc70, 0x2c7f000d}, {0xc74, 0x2186115b},
+	{0xc78, 0x0000001f}, {0xc7c, 0x00b99612},
+	{0xc80, 0x40000100}, {0xc84, 0x20f60000},
+	{0xc88, 0x40000100}, {0xc8c, 0xa0e40000},
+	{0xc90, 0x00121820}, {0xc94, 0x00000000},
+	{0xc98, 0x00121820}, {0xc9c, 0x00007f7f},
+	{0xca0, 0x00000000}, {0xca4, 0x00000080},
+	{0xca8, 0x00000000}, {0xcac, 0x00000000},
+	{0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+	{0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+	{0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+	{0xcc8, 0x00000000}, {0xccc, 0x00000000},
+	{0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+	{0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+	{0xce0, 0x00222222}, {0xce4, 0x00000000},
+	{0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+	{0xd00, 0x00080740}, {0xd04, 0x00020403},
+	{0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+	{0xd10, 0xa0633333}, {0xd14, 0x3333bc43},
+	{0xd18, 0x7a8f5b6b}, {0xd2c, 0xcc979975},
+	{0xd30, 0x00000000}, {0xd34, 0x80608000},
+	{0xd38, 0x00000000}, {0xd3c, 0x00027293},
+	{0xd40, 0x00000000}, {0xd44, 0x00000000},
+	{0xd48, 0x00000000}, {0xd4c, 0x00000000},
+	{0xd50, 0x6437140a}, {0xd54, 0x00000000},
+	{0xd58, 0x00000000}, {0xd5c, 0x30032064},
+	{0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+	{0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+	{0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+	{0xd78, 0x000e3c24}, {0xe00, 0x2a2a2a2a},
+	{0xe04, 0x2a2a2a2a}, {0xe08, 0x03902a2a},
+	{0xe10, 0x2a2a2a2a}, {0xe14, 0x2a2a2a2a},
+	{0xe18, 0x2a2a2a2a}, {0xe1c, 0x2a2a2a2a},
+	{0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+	{0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+	{0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+	{0xe44, 0x01004800}, {0xe48, 0xfb000000},
+	{0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+	{0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+	{0xe5c, 0x28160d05}, {0xe60, 0x00000010},
+	{0xe68, 0x001b25a4}, {0xe6c, 0x63db25a4},
+	{0xe70, 0x63db25a4}, {0xe74, 0x0c1b25a4},
+	{0xe78, 0x0c1b25a4}, {0xe7c, 0x0c1b25a4},
+	{0xe80, 0x0c1b25a4}, {0xe84, 0x63db25a4},
+	{0xe88, 0x0c1b25a4}, {0xe8c, 0x63db25a4},
+	{0xed0, 0x63db25a4}, {0xed4, 0x63db25a4},
+	{0xed8, 0x63db25a4}, {0xedc, 0x001b25a4},
+	{0xee0, 0x001b25a4}, {0xeec, 0x6fdb25a4},
+	{0xf14, 0x00000003}, {0xf4c, 0x00000000},
+	{0xf00, 0x00000300},
+	{0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8188ru_phy_1t_highpa_table[] = {
+	{0x024, 0x0011800f}, {0x028, 0x00ffdb83},
+	{0x040, 0x000c0004}, {0x800, 0x80040000},
+	{0x804, 0x00000001}, {0x808, 0x0000fc00},
+	{0x80c, 0x0000000a}, {0x810, 0x10005388},
+	{0x814, 0x020c3d10}, {0x818, 0x02200385},
+	{0x81c, 0x00000000}, {0x820, 0x01000100},
+	{0x824, 0x00390204}, {0x828, 0x00000000},
+	{0x82c, 0x00000000}, {0x830, 0x00000000},
+	{0x834, 0x00000000}, {0x838, 0x00000000},
+	{0x83c, 0x00000000}, {0x840, 0x00010000},
+	{0x844, 0x00000000}, {0x848, 0x00000000},
+	{0x84c, 0x00000000}, {0x850, 0x00000000},
+	{0x854, 0x00000000}, {0x858, 0x569a569a},
+	{0x85c, 0x001b25a4}, {0x860, 0x66e60230},
+	{0x864, 0x061f0130}, {0x868, 0x00000000},
+	{0x86c, 0x20202000}, {0x870, 0x03000300},
+	{0x874, 0x22004000}, {0x878, 0x00000808},
+	{0x87c, 0x00ffc3f1}, {0x880, 0xc0083070},
+	{0x884, 0x000004d5}, {0x888, 0x00000000},
+	{0x88c, 0xccc000c0}, {0x890, 0x00000800},
+	{0x894, 0xfffffffe}, {0x898, 0x40302010},
+	{0x89c, 0x00706050}, {0x900, 0x00000000},
+	{0x904, 0x00000023}, {0x908, 0x00000000},
+	{0x90c, 0x81121111}, {0xa00, 0x00d047c8},
+	{0xa04, 0x80ff000c}, {0xa08, 0x8c838300},
+	{0xa0c, 0x2e68120f}, {0xa10, 0x9500bb78},
+	{0xa14, 0x11144028}, {0xa18, 0x00881117},
+	{0xa1c, 0x89140f00}, {0xa20, 0x15160000},
+	{0xa24, 0x070b0f12}, {0xa28, 0x00000104},
+	{0xa2c, 0x00d30000}, {0xa70, 0x101fbf00},
+	{0xa74, 0x00000007}, {0xc00, 0x48071d40},
+	{0xc04, 0x03a05611}, {0xc08, 0x000000e4},
+	{0xc0c, 0x6c6c6c6c}, {0xc10, 0x08800000},
+	{0xc14, 0x40000100}, {0xc18, 0x08800000},
+	{0xc1c, 0x40000100}, {0xc20, 0x00000000},
+	{0xc24, 0x00000000}, {0xc28, 0x00000000},
+	{0xc2c, 0x00000000}, {0xc30, 0x69e9ac44},
+	{0xc34, 0x469652cf}, {0xc38, 0x49795994},
+	{0xc3c, 0x0a97971c}, {0xc40, 0x1f7c403f},
+	{0xc44, 0x000100b7}, {0xc48, 0xec020107},
+	{0xc4c, 0x007f037f}, {0xc50, 0x6954342e},
+	{0xc54, 0x43bc0094}, {0xc58, 0x6954342f},
+	{0xc5c, 0x433c0094}, {0xc60, 0x00000000},
+	{0xc64, 0x5116848b}, {0xc68, 0x47c00bff},
+	{0xc6c, 0x00000036}, {0xc70, 0x2c46000d},
+	{0xc74, 0x018610db}, {0xc78, 0x0000001f},
+	{0xc7c, 0x00b91612}, {0xc80, 0x24000090},
+	{0xc84, 0x20f60000}, {0xc88, 0x24000090},
+	{0xc8c, 0x20200000}, {0xc90, 0x00121820},
+	{0xc94, 0x00000000}, {0xc98, 0x00121820},
+	{0xc9c, 0x00007f7f}, {0xca0, 0x00000000},
+	{0xca4, 0x00000080}, {0xca8, 0x00000000},
+	{0xcac, 0x00000000}, {0xcb0, 0x00000000},
+	{0xcb4, 0x00000000}, {0xcb8, 0x00000000},
+	{0xcbc, 0x28000000}, {0xcc0, 0x00000000},
+	{0xcc4, 0x00000000}, {0xcc8, 0x00000000},
+	{0xccc, 0x00000000}, {0xcd0, 0x00000000},
+	{0xcd4, 0x00000000}, {0xcd8, 0x64b22427},
+	{0xcdc, 0x00766932}, {0xce0, 0x00222222},
+	{0xce4, 0x00000000}, {0xce8, 0x37644302},
+	{0xcec, 0x2f97d40c}, {0xd00, 0x00080740},
+	{0xd04, 0x00020401}, {0xd08, 0x0000907f},
+	{0xd0c, 0x20010201}, {0xd10, 0xa0633333},
+	{0xd14, 0x3333bc43}, {0xd18, 0x7a8f5b6b},
+	{0xd2c, 0xcc979975}, {0xd30, 0x00000000},
+	{0xd34, 0x80608000}, {0xd38, 0x00000000},
+	{0xd3c, 0x00027293}, {0xd40, 0x00000000},
+	{0xd44, 0x00000000}, {0xd48, 0x00000000},
+	{0xd4c, 0x00000000}, {0xd50, 0x6437140a},
+	{0xd54, 0x00000000}, {0xd58, 0x00000000},
+	{0xd5c, 0x30032064}, {0xd60, 0x4653de68},
+	{0xd64, 0x04518a3c}, {0xd68, 0x00002101},
+	{0xd6c, 0x2a201c16}, {0xd70, 0x1812362e},
+	{0xd74, 0x322c2220}, {0xd78, 0x000e3c24},
+	{0xe00, 0x24242424}, {0xe04, 0x24242424},
+	{0xe08, 0x03902024}, {0xe10, 0x24242424},
+	{0xe14, 0x24242424}, {0xe18, 0x24242424},
+	{0xe1c, 0x24242424}, {0xe28, 0x00000000},
+	{0xe30, 0x1000dc1f}, {0xe34, 0x10008c1f},
+	{0xe38, 0x02140102}, {0xe3c, 0x681604c2},
+	{0xe40, 0x01007c00}, {0xe44, 0x01004800},
+	{0xe48, 0xfb000000}, {0xe4c, 0x000028d1},
+	{0xe50, 0x1000dc1f}, {0xe54, 0x10008c1f},
+	{0xe58, 0x02140102}, {0xe5c, 0x28160d05},
+	{0xe60, 0x00000008}, {0xe68, 0x001b25a4},
+	{0xe6c, 0x631b25a0}, {0xe70, 0x631b25a0},
+	{0xe74, 0x081b25a0}, {0xe78, 0x081b25a0},
+	{0xe7c, 0x081b25a0}, {0xe80, 0x081b25a0},
+	{0xe84, 0x631b25a0}, {0xe88, 0x081b25a0},
+	{0xe8c, 0x631b25a0}, {0xed0, 0x631b25a0},
+	{0xed4, 0x631b25a0}, {0xed8, 0x631b25a0},
+	{0xedc, 0x001b25a0}, {0xee0, 0x001b25a0},
+	{0xeec, 0x6b1b25a0}, {0xee8, 0x31555448},
+	{0xf14, 0x00000003}, {0xf4c, 0x00000000},
+	{0xf00, 0x00000300},
+	{0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_standard_table[] = {
+	{0xc78, 0x7b000001}, {0xc78, 0x7b010001},
+	{0xc78, 0x7b020001}, {0xc78, 0x7b030001},
+	{0xc78, 0x7b040001}, {0xc78, 0x7b050001},
+	{0xc78, 0x7a060001}, {0xc78, 0x79070001},
+	{0xc78, 0x78080001}, {0xc78, 0x77090001},
+	{0xc78, 0x760a0001}, {0xc78, 0x750b0001},
+	{0xc78, 0x740c0001}, {0xc78, 0x730d0001},
+	{0xc78, 0x720e0001}, {0xc78, 0x710f0001},
+	{0xc78, 0x70100001}, {0xc78, 0x6f110001},
+	{0xc78, 0x6e120001}, {0xc78, 0x6d130001},
+	{0xc78, 0x6c140001}, {0xc78, 0x6b150001},
+	{0xc78, 0x6a160001}, {0xc78, 0x69170001},
+	{0xc78, 0x68180001}, {0xc78, 0x67190001},
+	{0xc78, 0x661a0001}, {0xc78, 0x651b0001},
+	{0xc78, 0x641c0001}, {0xc78, 0x631d0001},
+	{0xc78, 0x621e0001}, {0xc78, 0x611f0001},
+	{0xc78, 0x60200001}, {0xc78, 0x49210001},
+	{0xc78, 0x48220001}, {0xc78, 0x47230001},
+	{0xc78, 0x46240001}, {0xc78, 0x45250001},
+	{0xc78, 0x44260001}, {0xc78, 0x43270001},
+	{0xc78, 0x42280001}, {0xc78, 0x41290001},
+	{0xc78, 0x402a0001}, {0xc78, 0x262b0001},
+	{0xc78, 0x252c0001}, {0xc78, 0x242d0001},
+	{0xc78, 0x232e0001}, {0xc78, 0x222f0001},
+	{0xc78, 0x21300001}, {0xc78, 0x20310001},
+	{0xc78, 0x06320001}, {0xc78, 0x05330001},
+	{0xc78, 0x04340001}, {0xc78, 0x03350001},
+	{0xc78, 0x02360001}, {0xc78, 0x01370001},
+	{0xc78, 0x00380001}, {0xc78, 0x00390001},
+	{0xc78, 0x003a0001}, {0xc78, 0x003b0001},
+	{0xc78, 0x003c0001}, {0xc78, 0x003d0001},
+	{0xc78, 0x003e0001}, {0xc78, 0x003f0001},
+	{0xc78, 0x7b400001}, {0xc78, 0x7b410001},
+	{0xc78, 0x7b420001}, {0xc78, 0x7b430001},
+	{0xc78, 0x7b440001}, {0xc78, 0x7b450001},
+	{0xc78, 0x7a460001}, {0xc78, 0x79470001},
+	{0xc78, 0x78480001}, {0xc78, 0x77490001},
+	{0xc78, 0x764a0001}, {0xc78, 0x754b0001},
+	{0xc78, 0x744c0001}, {0xc78, 0x734d0001},
+	{0xc78, 0x724e0001}, {0xc78, 0x714f0001},
+	{0xc78, 0x70500001}, {0xc78, 0x6f510001},
+	{0xc78, 0x6e520001}, {0xc78, 0x6d530001},
+	{0xc78, 0x6c540001}, {0xc78, 0x6b550001},
+	{0xc78, 0x6a560001}, {0xc78, 0x69570001},
+	{0xc78, 0x68580001}, {0xc78, 0x67590001},
+	{0xc78, 0x665a0001}, {0xc78, 0x655b0001},
+	{0xc78, 0x645c0001}, {0xc78, 0x635d0001},
+	{0xc78, 0x625e0001}, {0xc78, 0x615f0001},
+	{0xc78, 0x60600001}, {0xc78, 0x49610001},
+	{0xc78, 0x48620001}, {0xc78, 0x47630001},
+	{0xc78, 0x46640001}, {0xc78, 0x45650001},
+	{0xc78, 0x44660001}, {0xc78, 0x43670001},
+	{0xc78, 0x42680001}, {0xc78, 0x41690001},
+	{0xc78, 0x406a0001}, {0xc78, 0x266b0001},
+	{0xc78, 0x256c0001}, {0xc78, 0x246d0001},
+	{0xc78, 0x236e0001}, {0xc78, 0x226f0001},
+	{0xc78, 0x21700001}, {0xc78, 0x20710001},
+	{0xc78, 0x06720001}, {0xc78, 0x05730001},
+	{0xc78, 0x04740001}, {0xc78, 0x03750001},
+	{0xc78, 0x02760001}, {0xc78, 0x01770001},
+	{0xc78, 0x00780001}, {0xc78, 0x00790001},
+	{0xc78, 0x007a0001}, {0xc78, 0x007b0001},
+	{0xc78, 0x007c0001}, {0xc78, 0x007d0001},
+	{0xc78, 0x007e0001}, {0xc78, 0x007f0001},
+	{0xc78, 0x3800001e}, {0xc78, 0x3801001e},
+	{0xc78, 0x3802001e}, {0xc78, 0x3803001e},
+	{0xc78, 0x3804001e}, {0xc78, 0x3805001e},
+	{0xc78, 0x3806001e}, {0xc78, 0x3807001e},
+	{0xc78, 0x3808001e}, {0xc78, 0x3c09001e},
+	{0xc78, 0x3e0a001e}, {0xc78, 0x400b001e},
+	{0xc78, 0x440c001e}, {0xc78, 0x480d001e},
+	{0xc78, 0x4c0e001e}, {0xc78, 0x500f001e},
+	{0xc78, 0x5210001e}, {0xc78, 0x5611001e},
+	{0xc78, 0x5a12001e}, {0xc78, 0x5e13001e},
+	{0xc78, 0x6014001e}, {0xc78, 0x6015001e},
+	{0xc78, 0x6016001e}, {0xc78, 0x6217001e},
+	{0xc78, 0x6218001e}, {0xc78, 0x6219001e},
+	{0xc78, 0x621a001e}, {0xc78, 0x621b001e},
+	{0xc78, 0x621c001e}, {0xc78, 0x621d001e},
+	{0xc78, 0x621e001e}, {0xc78, 0x621f001e},
+	{0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_highpa_table[] = {
+	{0xc78, 0x7b000001}, {0xc78, 0x7b010001},
+	{0xc78, 0x7b020001}, {0xc78, 0x7b030001},
+	{0xc78, 0x7b040001}, {0xc78, 0x7b050001},
+	{0xc78, 0x7b060001}, {0xc78, 0x7b070001},
+	{0xc78, 0x7b080001}, {0xc78, 0x7a090001},
+	{0xc78, 0x790a0001}, {0xc78, 0x780b0001},
+	{0xc78, 0x770c0001}, {0xc78, 0x760d0001},
+	{0xc78, 0x750e0001}, {0xc78, 0x740f0001},
+	{0xc78, 0x73100001}, {0xc78, 0x72110001},
+	{0xc78, 0x71120001}, {0xc78, 0x70130001},
+	{0xc78, 0x6f140001}, {0xc78, 0x6e150001},
+	{0xc78, 0x6d160001}, {0xc78, 0x6c170001},
+	{0xc78, 0x6b180001}, {0xc78, 0x6a190001},
+	{0xc78, 0x691a0001}, {0xc78, 0x681b0001},
+	{0xc78, 0x671c0001}, {0xc78, 0x661d0001},
+	{0xc78, 0x651e0001}, {0xc78, 0x641f0001},
+	{0xc78, 0x63200001}, {0xc78, 0x62210001},
+	{0xc78, 0x61220001}, {0xc78, 0x60230001},
+	{0xc78, 0x46240001}, {0xc78, 0x45250001},
+	{0xc78, 0x44260001}, {0xc78, 0x43270001},
+	{0xc78, 0x42280001}, {0xc78, 0x41290001},
+	{0xc78, 0x402a0001}, {0xc78, 0x262b0001},
+	{0xc78, 0x252c0001}, {0xc78, 0x242d0001},
+	{0xc78, 0x232e0001}, {0xc78, 0x222f0001},
+	{0xc78, 0x21300001}, {0xc78, 0x20310001},
+	{0xc78, 0x06320001}, {0xc78, 0x05330001},
+	{0xc78, 0x04340001}, {0xc78, 0x03350001},
+	{0xc78, 0x02360001}, {0xc78, 0x01370001},
+	{0xc78, 0x00380001}, {0xc78, 0x00390001},
+	{0xc78, 0x003a0001}, {0xc78, 0x003b0001},
+	{0xc78, 0x003c0001}, {0xc78, 0x003d0001},
+	{0xc78, 0x003e0001}, {0xc78, 0x003f0001},
+	{0xc78, 0x7b400001}, {0xc78, 0x7b410001},
+	{0xc78, 0x7b420001}, {0xc78, 0x7b430001},
+	{0xc78, 0x7b440001}, {0xc78, 0x7b450001},
+	{0xc78, 0x7b460001}, {0xc78, 0x7b470001},
+	{0xc78, 0x7b480001}, {0xc78, 0x7a490001},
+	{0xc78, 0x794a0001}, {0xc78, 0x784b0001},
+	{0xc78, 0x774c0001}, {0xc78, 0x764d0001},
+	{0xc78, 0x754e0001}, {0xc78, 0x744f0001},
+	{0xc78, 0x73500001}, {0xc78, 0x72510001},
+	{0xc78, 0x71520001}, {0xc78, 0x70530001},
+	{0xc78, 0x6f540001}, {0xc78, 0x6e550001},
+	{0xc78, 0x6d560001}, {0xc78, 0x6c570001},
+	{0xc78, 0x6b580001}, {0xc78, 0x6a590001},
+	{0xc78, 0x695a0001}, {0xc78, 0x685b0001},
+	{0xc78, 0x675c0001}, {0xc78, 0x665d0001},
+	{0xc78, 0x655e0001}, {0xc78, 0x645f0001},
+	{0xc78, 0x63600001}, {0xc78, 0x62610001},
+	{0xc78, 0x61620001}, {0xc78, 0x60630001},
+	{0xc78, 0x46640001}, {0xc78, 0x45650001},
+	{0xc78, 0x44660001}, {0xc78, 0x43670001},
+	{0xc78, 0x42680001}, {0xc78, 0x41690001},
+	{0xc78, 0x406a0001}, {0xc78, 0x266b0001},
+	{0xc78, 0x256c0001}, {0xc78, 0x246d0001},
+	{0xc78, 0x236e0001}, {0xc78, 0x226f0001},
+	{0xc78, 0x21700001}, {0xc78, 0x20710001},
+	{0xc78, 0x06720001}, {0xc78, 0x05730001},
+	{0xc78, 0x04740001}, {0xc78, 0x03750001},
+	{0xc78, 0x02760001}, {0xc78, 0x01770001},
+	{0xc78, 0x00780001}, {0xc78, 0x00790001},
+	{0xc78, 0x007a0001}, {0xc78, 0x007b0001},
+	{0xc78, 0x007c0001}, {0xc78, 0x007d0001},
+	{0xc78, 0x007e0001}, {0xc78, 0x007f0001},
+	{0xc78, 0x3800001e}, {0xc78, 0x3801001e},
+	{0xc78, 0x3802001e}, {0xc78, 0x3803001e},
+	{0xc78, 0x3804001e}, {0xc78, 0x3805001e},
+	{0xc78, 0x3806001e}, {0xc78, 0x3807001e},
+	{0xc78, 0x3808001e}, {0xc78, 0x3c09001e},
+	{0xc78, 0x3e0a001e}, {0xc78, 0x400b001e},
+	{0xc78, 0x440c001e}, {0xc78, 0x480d001e},
+	{0xc78, 0x4c0e001e}, {0xc78, 0x500f001e},
+	{0xc78, 0x5210001e}, {0xc78, 0x5611001e},
+	{0xc78, 0x5a12001e}, {0xc78, 0x5e13001e},
+	{0xc78, 0x6014001e}, {0xc78, 0x6015001e},
+	{0xc78, 0x6016001e}, {0xc78, 0x6217001e},
+	{0xc78, 0x6218001e}, {0xc78, 0x6219001e},
+	{0xc78, 0x621a001e}, {0xc78, 0x621b001e},
+	{0xc78, 0x621c001e}, {0xc78, 0x621d001e},
+	{0xc78, 0x621e001e}, {0xc78, 0x621f001e},
+	{0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
+	{0x00, 0x00030159}, {0x01, 0x00031284},
+	{0x02, 0x00098000}, {0x03, 0x00039c63},
+	{0x04, 0x000210e7}, {0x09, 0x0002044f},
+	{0x0a, 0x0001a3f1}, {0x0b, 0x00014787},
+	{0x0c, 0x000896fe}, {0x0d, 0x0000e02c},
+	{0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+	{0x19, 0x00000000}, {0x1a, 0x00030355},
+	{0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+	{0x1d, 0x000a1250}, {0x1e, 0x0000024f},
+	{0x1f, 0x00000000}, {0x20, 0x0000b614},
+	{0x21, 0x0006c000}, {0x22, 0x00000000},
+	{0x23, 0x00001558}, {0x24, 0x00000060},
+	{0x25, 0x00000483}, {0x26, 0x0004f000},
+	{0x27, 0x000ec7d9}, {0x28, 0x00057730},
+	{0x29, 0x00004783}, {0x2a, 0x00000001},
+	{0x2b, 0x00021334}, {0x2a, 0x00000000},
+	{0x2b, 0x00000054}, {0x2a, 0x00000001},
+	{0x2b, 0x00000808}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000c}, {0x2a, 0x00000002},
+	{0x2b, 0x00000808}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000003},
+	{0x2b, 0x00000808}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000004},
+	{0x2b, 0x00000808}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000005},
+	{0x2b, 0x00000808}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000006},
+	{0x2b, 0x00000709}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000007},
+	{0x2b, 0x00000709}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000008},
+	{0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000009},
+	{0x2b, 0x0000060a}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+	{0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+	{0x2b, 0x0000060a}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+	{0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+	{0x2b, 0x0000060a}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+	{0x2b, 0x0000050b}, {0x2b, 0x00066666},
+	{0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+	{0x10, 0x0004000f}, {0x11, 0x000e31fc},
+	{0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+	{0x10, 0x0002000f}, {0x11, 0x000203f9},
+	{0x10, 0x0003000f}, {0x11, 0x000ff500},
+	{0x10, 0x00000000}, {0x11, 0x00000000},
+	{0x10, 0x0008000f}, {0x11, 0x0003f100},
+	{0x10, 0x0009000f}, {0x11, 0x00023100},
+	{0x12, 0x00032000}, {0x12, 0x00071000},
+	{0x12, 0x000b0000}, {0x12, 0x000fc000},
+	{0x13, 0x000287b3}, {0x13, 0x000244b7},
+	{0x13, 0x000204ab}, {0x13, 0x0001c49f},
+	{0x13, 0x00018493}, {0x13, 0x0001429b},
+	{0x13, 0x00010299}, {0x13, 0x0000c29c},
+	{0x13, 0x000081a0}, {0x13, 0x000040ac},
+	{0x13, 0x00000020}, {0x14, 0x0001944c},
+	{0x14, 0x00059444}, {0x14, 0x0009944c},
+	{0x14, 0x000d9444}, {0x15, 0x0000f474},
+	{0x15, 0x0004f477}, {0x15, 0x0008f455},
+	{0x15, 0x000cf455}, {0x16, 0x00000339},
+	{0x16, 0x00040339}, {0x16, 0x00080339},
+	{0x16, 0x000c0366}, {0x00, 0x00010159},
+	{0x18, 0x0000f401}, {0xfe, 0x00000000},
+	{0xfe, 0x00000000}, {0x1f, 0x00000003},
+	{0xfe, 0x00000000}, {0xfe, 0x00000000},
+	{0x1e, 0x00000247}, {0x1f, 0x00000000},
+	{0x00, 0x00030159},
+	{0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
+	{0x00, 0x00030159}, {0x01, 0x00031284},
+	{0x02, 0x00098000}, {0x03, 0x00018c63},
+	{0x04, 0x000210e7}, {0x09, 0x0002044f},
+	{0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+	{0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+	{0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+	{0x19, 0x00000000}, {0x1a, 0x00010255},
+	{0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+	{0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+	{0x1f, 0x00080001}, {0x20, 0x0000b614},
+	{0x21, 0x0006c000}, {0x22, 0x00000000},
+	{0x23, 0x00001558}, {0x24, 0x00000060},
+	{0x25, 0x00000483}, {0x26, 0x0004f000},
+	{0x27, 0x000ec7d9}, {0x28, 0x000577c0},
+	{0x29, 0x00004783}, {0x2a, 0x00000001},
+	{0x2b, 0x00021334}, {0x2a, 0x00000000},
+	{0x2b, 0x00000054}, {0x2a, 0x00000001},
+	{0x2b, 0x00000808}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000c}, {0x2a, 0x00000002},
+	{0x2b, 0x00000808}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000003},
+	{0x2b, 0x00000808}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000004},
+	{0x2b, 0x00000808}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000005},
+	{0x2b, 0x00000808}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000006},
+	{0x2b, 0x00000709}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000007},
+	{0x2b, 0x00000709}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000008},
+	{0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000009},
+	{0x2b, 0x0000060a}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+	{0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+	{0x2b, 0x0000060a}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+	{0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+	{0x2b, 0x0000060a}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+	{0x2b, 0x0000050b}, {0x2b, 0x00066666},
+	{0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+	{0x10, 0x0004000f}, {0x11, 0x000e31fc},
+	{0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+	{0x10, 0x0002000f}, {0x11, 0x000203f9},
+	{0x10, 0x0003000f}, {0x11, 0x000ff500},
+	{0x10, 0x00000000}, {0x11, 0x00000000},
+	{0x10, 0x0008000f}, {0x11, 0x0003f100},
+	{0x10, 0x0009000f}, {0x11, 0x00023100},
+	{0x12, 0x00032000}, {0x12, 0x00071000},
+	{0x12, 0x000b0000}, {0x12, 0x000fc000},
+	{0x13, 0x000287b3}, {0x13, 0x000244b7},
+	{0x13, 0x000204ab}, {0x13, 0x0001c49f},
+	{0x13, 0x00018493}, {0x13, 0x0001429b},
+	{0x13, 0x00010299}, {0x13, 0x0000c29c},
+	{0x13, 0x000081a0}, {0x13, 0x000040ac},
+	{0x13, 0x00000020}, {0x14, 0x0001944c},
+	{0x14, 0x00059444}, {0x14, 0x0009944c},
+	{0x14, 0x000d9444}, {0x15, 0x0000f424},
+	{0x15, 0x0004f424}, {0x15, 0x0008f424},
+	{0x15, 0x000cf424}, {0x16, 0x000e0330},
+	{0x16, 0x000a0330}, {0x16, 0x00060330},
+	{0x16, 0x00020330}, {0x00, 0x00010159},
+	{0x18, 0x0000f401}, {0xfe, 0x00000000},
+	{0xfe, 0x00000000}, {0x1f, 0x00080003},
+	{0xfe, 0x00000000}, {0xfe, 0x00000000},
+	{0x1e, 0x00044457}, {0x1f, 0x00080000},
+	{0x00, 0x00030159},
+	{0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radiob_2t_init_table[] = {
+	{0x00, 0x00030159}, {0x01, 0x00031284},
+	{0x02, 0x00098000}, {0x03, 0x00018c63},
+	{0x04, 0x000210e7}, {0x09, 0x0002044f},
+	{0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+	{0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+	{0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+	{0x12, 0x00032000}, {0x12, 0x00071000},
+	{0x12, 0x000b0000}, {0x12, 0x000fc000},
+	{0x13, 0x000287af}, {0x13, 0x000244b7},
+	{0x13, 0x000204ab}, {0x13, 0x0001c49f},
+	{0x13, 0x00018493}, {0x13, 0x00014297},
+	{0x13, 0x00010295}, {0x13, 0x0000c298},
+	{0x13, 0x0000819c}, {0x13, 0x000040a8},
+	{0x13, 0x0000001c}, {0x14, 0x0001944c},
+	{0x14, 0x00059444}, {0x14, 0x0009944c},
+	{0x14, 0x000d9444}, {0x15, 0x0000f424},
+	{0x15, 0x0004f424}, {0x15, 0x0008f424},
+	{0x15, 0x000cf424}, {0x16, 0x000e0330},
+	{0x16, 0x000a0330}, {0x16, 0x00060330},
+	{0x16, 0x00020330},
+	{0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radioa_1t_init_table[] = {
+	{0x00, 0x00030159}, {0x01, 0x00031284},
+	{0x02, 0x00098000}, {0x03, 0x00018c63},
+	{0x04, 0x000210e7}, {0x09, 0x0002044f},
+	{0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+	{0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+	{0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+	{0x19, 0x00000000}, {0x1a, 0x00010255},
+	{0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+	{0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+	{0x1f, 0x00080001}, {0x20, 0x0000b614},
+	{0x21, 0x0006c000}, {0x22, 0x00000000},
+	{0x23, 0x00001558}, {0x24, 0x00000060},
+	{0x25, 0x00000483}, {0x26, 0x0004f000},
+	{0x27, 0x000ec7d9}, {0x28, 0x000577c0},
+	{0x29, 0x00004783}, {0x2a, 0x00000001},
+	{0x2b, 0x00021334}, {0x2a, 0x00000000},
+	{0x2b, 0x00000054}, {0x2a, 0x00000001},
+	{0x2b, 0x00000808}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000c}, {0x2a, 0x00000002},
+	{0x2b, 0x00000808}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000003},
+	{0x2b, 0x00000808}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000004},
+	{0x2b, 0x00000808}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000005},
+	{0x2b, 0x00000808}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000006},
+	{0x2b, 0x00000709}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000007},
+	{0x2b, 0x00000709}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000008},
+	{0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000009},
+	{0x2b, 0x0000060a}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+	{0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+	{0x2b, 0x0000060a}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+	{0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+	{0x2b, 0x0000060a}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+	{0x2b, 0x0000050b}, {0x2b, 0x00066666},
+	{0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+	{0x10, 0x0004000f}, {0x11, 0x000e31fc},
+	{0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+	{0x10, 0x0002000f}, {0x11, 0x000203f9},
+	{0x10, 0x0003000f}, {0x11, 0x000ff500},
+	{0x10, 0x00000000}, {0x11, 0x00000000},
+	{0x10, 0x0008000f}, {0x11, 0x0003f100},
+	{0x10, 0x0009000f}, {0x11, 0x00023100},
+	{0x12, 0x00032000}, {0x12, 0x00071000},
+	{0x12, 0x000b0000}, {0x12, 0x000fc000},
+	{0x13, 0x000287b3}, {0x13, 0x000244b7},
+	{0x13, 0x000204ab}, {0x13, 0x0001c49f},
+	{0x13, 0x00018493}, {0x13, 0x0001429b},
+	{0x13, 0x00010299}, {0x13, 0x0000c29c},
+	{0x13, 0x000081a0}, {0x13, 0x000040ac},
+	{0x13, 0x00000020}, {0x14, 0x0001944c},
+	{0x14, 0x00059444}, {0x14, 0x0009944c},
+	{0x14, 0x000d9444}, {0x15, 0x0000f405},
+	{0x15, 0x0004f405}, {0x15, 0x0008f405},
+	{0x15, 0x000cf405}, {0x16, 0x000e0330},
+	{0x16, 0x000a0330}, {0x16, 0x00060330},
+	{0x16, 0x00020330}, {0x00, 0x00010159},
+	{0x18, 0x0000f401}, {0xfe, 0x00000000},
+	{0xfe, 0x00000000}, {0x1f, 0x00080003},
+	{0xfe, 0x00000000}, {0xfe, 0x00000000},
+	{0x1e, 0x00044457}, {0x1f, 0x00080000},
+	{0x00, 0x00030159},
+	{0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = {
+	{0x00, 0x00030159}, {0x01, 0x00031284},
+	{0x02, 0x00098000}, {0x03, 0x00018c63},
+	{0x04, 0x000210e7}, {0x09, 0x0002044f},
+	{0x0a, 0x0001adb0}, {0x0b, 0x00054867},
+	{0x0c, 0x0008992e}, {0x0d, 0x0000e529},
+	{0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+	{0x19, 0x00000000}, {0x1a, 0x00000255},
+	{0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+	{0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+	{0x1f, 0x00080001}, {0x20, 0x0000b614},
+	{0x21, 0x0006c000}, {0x22, 0x0000083c},
+	{0x23, 0x00001558}, {0x24, 0x00000060},
+	{0x25, 0x00000483}, {0x26, 0x0004f000},
+	{0x27, 0x000ec7d9}, {0x28, 0x000977c0},
+	{0x29, 0x00004783}, {0x2a, 0x00000001},
+	{0x2b, 0x00021334}, {0x2a, 0x00000000},
+	{0x2b, 0x00000054}, {0x2a, 0x00000001},
+	{0x2b, 0x00000808}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000c}, {0x2a, 0x00000002},
+	{0x2b, 0x00000808}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000003},
+	{0x2b, 0x00000808}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000004},
+	{0x2b, 0x00000808}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000005},
+	{0x2b, 0x00000808}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000006},
+	{0x2b, 0x00000709}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000007},
+	{0x2b, 0x00000709}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000008},
+	{0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x00000009},
+	{0x2b, 0x0000060a}, {0x2b, 0x00053333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+	{0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+	{0x2b, 0x0000060a}, {0x2b, 0x00063333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+	{0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+	{0x2b, 0x0000060a}, {0x2b, 0x00073333},
+	{0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+	{0x2b, 0x0000050b}, {0x2b, 0x00066666},
+	{0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+	{0x10, 0x0004000f}, {0x11, 0x000e31fc},
+	{0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+	{0x10, 0x0002000f}, {0x11, 0x000203f9},
+	{0x10, 0x0003000f}, {0x11, 0x000ff500},
+	{0x10, 0x00000000}, {0x11, 0x00000000},
+	{0x10, 0x0008000f}, {0x11, 0x0003f100},
+	{0x10, 0x0009000f}, {0x11, 0x00023100},
+	{0x12, 0x000d8000}, {0x12, 0x00090000},
+	{0x12, 0x00051000}, {0x12, 0x00012000},
+	{0x13, 0x00028fb4}, {0x13, 0x00024fa8},
+	{0x13, 0x000207a4}, {0x13, 0x0001c3b0},
+	{0x13, 0x000183a4}, {0x13, 0x00014398},
+	{0x13, 0x000101a4}, {0x13, 0x0000c198},
+	{0x13, 0x000080a4}, {0x13, 0x00004098},
+	{0x13, 0x00000000}, {0x14, 0x0001944c},
+	{0x14, 0x00059444}, {0x14, 0x0009944c},
+	{0x14, 0x000d9444}, {0x15, 0x0000f405},
+	{0x15, 0x0004f405}, {0x15, 0x0008f405},
+	{0x15, 0x000cf405}, {0x16, 0x000e0330},
+	{0x16, 0x000a0330}, {0x16, 0x00060330},
+	{0x16, 0x00020330}, {0x00, 0x00010159},
+	{0x18, 0x0000f401}, {0xfe, 0x00000000},
+	{0xfe, 0x00000000}, {0x1f, 0x00080003},
+	{0xfe, 0x00000000}, {0xfe, 0x00000000},
+	{0x1e, 0x00044457}, {0x1f, 0x00080000},
+	{0x00, 0x00030159},
+	{0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
+	{	/* RF_A */
+		.hssiparm1 = REG_FPGA0_XA_HSSI_PARM1,
+		.hssiparm2 = REG_FPGA0_XA_HSSI_PARM2,
+		.lssiparm = REG_FPGA0_XA_LSSI_PARM,
+		.hspiread = REG_HSPI_XA_READBACK,
+		.lssiread = REG_FPGA0_XA_LSSI_READBACK,
+		.rf_sw_ctrl = REG_FPGA0_XA_RF_SW_CTRL,
+	},
+	{	/* RF_B */
+		.hssiparm1 = REG_FPGA0_XB_HSSI_PARM1,
+		.hssiparm2 = REG_FPGA0_XB_HSSI_PARM2,
+		.lssiparm = REG_FPGA0_XB_LSSI_PARM,
+		.hspiread = REG_HSPI_XB_READBACK,
+		.lssiread = REG_FPGA0_XB_LSSI_READBACK,
+		.rf_sw_ctrl = REG_FPGA0_XB_RF_SW_CTRL,
+	},
+};
+
+static const u32 rtl8723au_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
+	REG_OFDM0_XA_RX_IQ_IMBALANCE,
+	REG_OFDM0_XB_RX_IQ_IMBALANCE,
+	REG_OFDM0_ENERGY_CCA_THRES,
+	REG_OFDM0_AGCR_SSI_TABLE,
+	REG_OFDM0_XA_TX_IQ_IMBALANCE,
+	REG_OFDM0_XB_TX_IQ_IMBALANCE,
+	REG_OFDM0_XC_TX_AFE,
+	REG_OFDM0_XD_TX_AFE,
+	REG_OFDM0_RX_IQ_EXT_ANTA
+};
+
+static u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr)
+{
+	struct usb_device *udev = priv->udev;
+	int len;
+	u8 data;
+
+	mutex_lock(&priv->usb_buf_mutex);
+	len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+			      REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
+			      addr, 0, &priv->usb_buf.val8, sizeof(u8),
+			      RTW_USB_CONTROL_MSG_TIMEOUT);
+	data = priv->usb_buf.val8;
+	mutex_unlock(&priv->usb_buf_mutex);
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
+		dev_info(&udev->dev, "%s(%04x)   = 0x%02x, len %i\n",
+			 __func__, addr, data, len);
+	return data;
+}
+
+static u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr)
+{
+	struct usb_device *udev = priv->udev;
+	int len;
+	u16 data;
+
+	mutex_lock(&priv->usb_buf_mutex);
+	len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+			      REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
+			      addr, 0, &priv->usb_buf.val16, sizeof(u16),
+			      RTW_USB_CONTROL_MSG_TIMEOUT);
+	data = le16_to_cpu(priv->usb_buf.val16);
+	mutex_unlock(&priv->usb_buf_mutex);
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
+		dev_info(&udev->dev, "%s(%04x)  = 0x%04x, len %i\n",
+			 __func__, addr, data, len);
+	return data;
+}
+
+static u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr)
+{
+	struct usb_device *udev = priv->udev;
+	int len;
+	u32 data;
+
+	mutex_lock(&priv->usb_buf_mutex);
+	len = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+			      REALTEK_USB_CMD_REQ, REALTEK_USB_READ,
+			      addr, 0, &priv->usb_buf.val32, sizeof(u32),
+			      RTW_USB_CONTROL_MSG_TIMEOUT);
+	data = le32_to_cpu(priv->usb_buf.val32);
+	mutex_unlock(&priv->usb_buf_mutex);
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_READ)
+		dev_info(&udev->dev, "%s(%04x)  = 0x%08x, len %i\n",
+			 __func__, addr, data, len);
+	return data;
+}
+
+static int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val)
+{
+	struct usb_device *udev = priv->udev;
+	int ret;
+
+	mutex_lock(&priv->usb_buf_mutex);
+	priv->usb_buf.val8 = val;
+	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+			      REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+			      addr, 0, &priv->usb_buf.val8, sizeof(u8),
+			      RTW_USB_CONTROL_MSG_TIMEOUT);
+
+	mutex_unlock(&priv->usb_buf_mutex);
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+		dev_info(&udev->dev, "%s(%04x) = 0x%02x\n",
+			 __func__, addr, val);
+	return ret;
+}
+
+static int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val)
+{
+	struct usb_device *udev = priv->udev;
+	int ret;
+
+	mutex_lock(&priv->usb_buf_mutex);
+	priv->usb_buf.val16 = cpu_to_le16(val);
+	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+			      REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+			      addr, 0, &priv->usb_buf.val16, sizeof(u16),
+			      RTW_USB_CONTROL_MSG_TIMEOUT);
+	mutex_unlock(&priv->usb_buf_mutex);
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+		dev_info(&udev->dev, "%s(%04x) = 0x%04x\n",
+			 __func__, addr, val);
+	return ret;
+}
+
+static int rtl8xxxu_write32(struct rtl8xxxu_priv *priv, u16 addr, u32 val)
+{
+	struct usb_device *udev = priv->udev;
+	int ret;
+
+	mutex_lock(&priv->usb_buf_mutex);
+	priv->usb_buf.val32 = cpu_to_le32(val);
+	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+			      REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+			      addr, 0, &priv->usb_buf.val32, sizeof(u32),
+			      RTW_USB_CONTROL_MSG_TIMEOUT);
+	mutex_unlock(&priv->usb_buf_mutex);
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE)
+		dev_info(&udev->dev, "%s(%04x) = 0x%08x\n",
+			 __func__, addr, val);
+	return ret;
+}
+
+static int
+rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len)
+{
+	struct usb_device *udev = priv->udev;
+	int blocksize = priv->fops->writeN_block_size;
+	int ret, i, count, remainder;
+
+	count = len / blocksize;
+	remainder = len % blocksize;
+
+	for (i = 0; i < count; i++) {
+		ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+				      REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+				      addr, 0, buf, blocksize,
+				      RTW_USB_CONTROL_MSG_TIMEOUT);
+		if (ret != blocksize)
+			goto write_error;
+
+		addr += blocksize;
+		buf += blocksize;
+	}
+
+	if (remainder) {
+		ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+				      REALTEK_USB_CMD_REQ, REALTEK_USB_WRITE,
+				      addr, 0, buf, remainder,
+				      RTW_USB_CONTROL_MSG_TIMEOUT);
+		if (ret != remainder)
+			goto write_error;
+	}
+
+	return len;
+
+write_error:
+	dev_info(&udev->dev,
+		 "%s: Failed to write block at addr: %04x size: %04x\n",
+		 __func__, addr, blocksize);
+	return -EAGAIN;
+}
+
+static u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
+			       enum rtl8xxxu_rfpath path, u8 reg)
+{
+	u32 hssia, val32, retval;
+
+	hssia = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
+	if (path != RF_A)
+		val32 = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hssiparm2);
+	else
+		val32 = hssia;
+
+	val32 &= ~FPGA0_HSSI_PARM2_ADDR_MASK;
+	val32 |= (reg << FPGA0_HSSI_PARM2_ADDR_SHIFT);
+	val32 |= FPGA0_HSSI_PARM2_EDGE_READ;
+	hssia &= ~FPGA0_HSSI_PARM2_EDGE_READ;
+	rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM2, hssia);
+
+	udelay(10);
+
+	rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].hssiparm2, val32);
+	udelay(100);
+
+	hssia |= FPGA0_HSSI_PARM2_EDGE_READ;
+	rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM2, hssia);
+	udelay(10);
+
+	val32 = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hssiparm1);
+	if (val32 & FPGA0_HSSI_PARM1_PI)
+		retval = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].hspiread);
+	else
+		retval = rtl8xxxu_read32(priv, rtl8xxxu_rfregs[path].lssiread);
+
+	retval &= 0xfffff;
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_READ)
+		dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n",
+			 __func__, reg, retval);
+	return retval;
+}
+
+static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
+				enum rtl8xxxu_rfpath path, u8 reg, u32 data)
+{
+	int ret, retval;
+	u32 dataaddr;
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_WRITE)
+		dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n",
+			 __func__, reg, data);
+
+	data &= FPGA0_LSSI_PARM_DATA_MASK;
+	dataaddr = (reg << FPGA0_LSSI_PARM_ADDR_SHIFT) | data;
+
+	/* Use XB for path B */
+	ret = rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].lssiparm, dataaddr);
+	if (ret != sizeof(dataaddr))
+		retval = -EIO;
+	else
+		retval = 0;
+
+	udelay(1);
+
+	return retval;
+}
+
+static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c)
+{
+	struct device *dev = &priv->udev->dev;
+	int mbox_nr, retry, retval = 0;
+	int mbox_reg, mbox_ext_reg;
+	u8 val8;
+
+	mutex_lock(&priv->h2c_mutex);
+
+	mbox_nr = priv->next_mbox;
+	mbox_reg = REG_HMBOX_0 + (mbox_nr * 4);
+	mbox_ext_reg = REG_HMBOX_EXT_0 + (mbox_nr * 2);
+
+	/*
+	 * MBOX ready?
+	 */
+	retry = 100;
+	do {
+		val8 = rtl8xxxu_read8(priv, REG_HMTFR);
+		if (!(val8 & BIT(mbox_nr)))
+			break;
+	} while (retry--);
+
+	if (!retry) {
+		dev_dbg(dev, "%s: Mailbox busy\n", __func__);
+		retval = -EBUSY;
+		goto error;
+	}
+
+	/*
+	 * Need to swap as it's being swapped again by rtl8xxxu_write16/32()
+	 */
+	if (h2c->cmd.cmd & H2C_EXT) {
+		rtl8xxxu_write16(priv, mbox_ext_reg,
+				 le16_to_cpu(h2c->raw.ext));
+		if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+			dev_info(dev, "H2C_EXT %04x\n",
+				 le16_to_cpu(h2c->raw.ext));
+	}
+	rtl8xxxu_write32(priv, mbox_reg, le32_to_cpu(h2c->raw.data));
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+		dev_info(dev, "H2C %08x\n", le32_to_cpu(h2c->raw.data));
+
+	priv->next_mbox = (mbox_nr + 1) % H2C_MAX_MBOX;
+
+error:
+	mutex_unlock(&priv->h2c_mutex);
+	return retval;
+}
+
+static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u32 val32;
+
+	val8 = rtl8xxxu_read8(priv, REG_SPS0_CTRL);
+	val8 |= BIT(0) | BIT(3);
+	rtl8xxxu_write8(priv, REG_SPS0_CTRL, val8);
+
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_PARM);
+	val32 &= ~(BIT(4) | BIT(5));
+	val32 |= BIT(3);
+	if (priv->rf_paths == 2) {
+		val32 &= ~(BIT(20) | BIT(21));
+		val32 |= BIT(19);
+	}
+	rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_PARM, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+	val32 &= ~OFDM_RF_PATH_TX_MASK;
+	if (priv->tx_paths == 2)
+		val32 |= OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B;
+	else if (priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c)
+		val32 |= OFDM_RF_PATH_TX_B;
+	else
+		val32 |= OFDM_RF_PATH_TX_A;
+	rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+	val32 &= ~FPGA_RF_MODE_JAPAN;
+	rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+	if (priv->rf_paths == 2)
+		rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x63db25a0);
+	else
+		rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x631b25a0);
+
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x32d95);
+	if (priv->rf_paths == 2)
+		rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x32d95);
+
+	rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+}
+
+static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv)
+{
+	u8 sps0;
+	u32 val32;
+
+	rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+	sps0 = rtl8xxxu_read8(priv, REG_SPS0_CTRL);
+
+	/* RF RX code for preamble power saving */
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_PARM);
+	val32 &= ~(BIT(3) | BIT(4) | BIT(5));
+	if (priv->rf_paths == 2)
+		val32 &= ~(BIT(19) | BIT(20) | BIT(21));
+	rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_PARM, val32);
+
+	/* Disable TX for four paths */
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+	val32 &= ~OFDM_RF_PATH_TX_MASK;
+	rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+	/* Enable power saving */
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+	val32 |= FPGA_RF_MODE_JAPAN;
+	rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+	/* AFE control register to power down bits [30:22] */
+	if (priv->rf_paths == 2)
+		rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x00db25a0);
+	else
+		rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, 0x001b25a0);
+
+	/* Power down RF module */
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0);
+	if (priv->rf_paths == 2)
+		rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0);
+
+	sps0 &= ~(BIT(0) | BIT(3));
+	rtl8xxxu_write8(priv, REG_SPS0_CTRL, sps0);
+}
+
+
+static void rtl8723a_stop_tx_beacon(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+
+	val8 = rtl8xxxu_read8(priv, REG_FWHW_TXQ_CTRL + 2);
+	val8 &= ~BIT(6);
+	rtl8xxxu_write8(priv, REG_FWHW_TXQ_CTRL + 2, val8);
+
+	rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 1, 0x64);
+	val8 = rtl8xxxu_read8(priv, REG_TBTT_PROHIBIT + 2);
+	val8 &= ~BIT(0);
+	rtl8xxxu_write8(priv, REG_TBTT_PROHIBIT + 2, val8);
+}
+
+
+/*
+ * The rtl8723a has 3 channel groups for it's efuse settings. It only
+ * supports the 2.4GHz band, so channels 1 - 14:
+ *  group 0: channels 1 - 3
+ *  group 1: channels 4 - 9
+ *  group 2: channels 10 - 14
+ *
+ * Note: We index from 0 in the code
+ */
+static int rtl8723a_channel_to_group(int channel)
+{
+	int group;
+
+	if (channel < 4)
+		group = 0;
+	else if (channel < 10)
+		group = 1;
+	else
+		group = 2;
+
+	return group;
+}
+
+static void rtl8723au_config_channel(struct ieee80211_hw *hw)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	u32 val32, rsr;
+	u8 val8, opmode;
+	bool ht = true;
+	int sec_ch_above, channel;
+	int i;
+
+	opmode = rtl8xxxu_read8(priv, REG_BW_OPMODE);
+	rsr = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+	channel = hw->conf.chandef.chan->hw_value;
+
+	switch (hw->conf.chandef.width) {
+	case NL80211_CHAN_WIDTH_20_NOHT:
+		ht = false;
+	case NL80211_CHAN_WIDTH_20:
+		opmode |= BW_OPMODE_20MHZ;
+		rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+		val32 &= ~FPGA_RF_MODE;
+		rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+		val32 &= ~FPGA_RF_MODE;
+		rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_ANALOG2);
+		val32 |= FPGA0_ANALOG2_20MHZ;
+		rtl8xxxu_write32(priv, REG_FPGA0_ANALOG2, val32);
+		break;
+	case NL80211_CHAN_WIDTH_40:
+		if (hw->conf.chandef.center_freq1 >
+		    hw->conf.chandef.chan->center_freq) {
+			sec_ch_above = 1;
+			channel += 2;
+		} else {
+			sec_ch_above = 0;
+			channel -= 2;
+		}
+
+		opmode &= ~BW_OPMODE_20MHZ;
+		rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
+		rsr &= ~RSR_RSC_BANDWIDTH_40M;
+		if (sec_ch_above)
+			rsr |= RSR_RSC_UPPER_SUB_CHANNEL;
+		else
+			rsr |= RSR_RSC_LOWER_SUB_CHANNEL;
+		rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, rsr);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+		val32 |= FPGA_RF_MODE;
+		rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+		val32 |= FPGA_RF_MODE;
+		rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+		/*
+		 * Set Control channel to upper or lower. These settings
+		 * are required only for 40MHz
+		 */
+		val32 = rtl8xxxu_read32(priv, REG_CCK0_SYSTEM);
+		val32 &= ~CCK0_SIDEBAND;
+		if (!sec_ch_above)
+			val32 |= CCK0_SIDEBAND;
+		rtl8xxxu_write32(priv, REG_CCK0_SYSTEM, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_OFDM1_LSTF);
+		val32 &= ~OFDM_LSTF_PRIME_CH_MASK; /* 0xc00 */
+		if (sec_ch_above)
+			val32 |= OFDM_LSTF_PRIME_CH_LOW;
+		else
+			val32 |= OFDM_LSTF_PRIME_CH_HIGH;
+		rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_ANALOG2);
+		val32 &= ~FPGA0_ANALOG2_20MHZ;
+		rtl8xxxu_write32(priv, REG_FPGA0_ANALOG2, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+		val32 &= ~(FPGA0_PS_LOWER_CHANNEL | FPGA0_PS_UPPER_CHANNEL);
+		if (sec_ch_above)
+			val32 |= FPGA0_PS_UPPER_CHANNEL;
+		else
+			val32 |= FPGA0_PS_LOWER_CHANNEL;
+		rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+		break;
+
+	default:
+		break;
+	}
+
+	for (i = RF_A; i < priv->rf_paths; i++) {
+		val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+		val32 &= ~MODE_AG_CHANNEL_MASK;
+		val32 |= channel;
+		rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+	}
+
+	if (ht)
+		val8 = 0x0e;
+	else
+		val8 = 0x0a;
+
+	rtl8xxxu_write8(priv, REG_SIFS_CCK + 1, val8);
+	rtl8xxxu_write8(priv, REG_SIFS_OFDM + 1, val8);
+
+	rtl8xxxu_write16(priv, REG_R2T_SIFS, 0x0808);
+	rtl8xxxu_write16(priv, REG_T2T_SIFS, 0x0a0a);
+
+	for (i = RF_A; i < priv->rf_paths; i++) {
+		val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+		if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40)
+			val32 &= ~MODE_AG_CHANNEL_20MHZ;
+		else
+			val32 |= MODE_AG_CHANNEL_20MHZ;
+		rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+	}
+}
+
+static void
+rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+{
+	u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
+	u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
+	u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
+	u8 val8;
+	int group, i;
+
+	group = rtl8723a_channel_to_group(channel);
+
+	cck[0] = priv->cck_tx_power_index_A[group];
+	cck[1] = priv->cck_tx_power_index_B[group];
+
+	ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
+	ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
+
+	ofdmbase[0] = ofdm[0] +	priv->ofdm_tx_power_index_diff[group].a;
+	ofdmbase[1] = ofdm[1] +	priv->ofdm_tx_power_index_diff[group].b;
+
+	mcsbase[0] = ofdm[0];
+	mcsbase[1] = ofdm[1];
+	if (!ht40) {
+		mcsbase[0] += priv->ht20_tx_power_index_diff[group].a;
+		mcsbase[1] += priv->ht20_tx_power_index_diff[group].b;
+	}
+
+	if (priv->tx_paths > 1) {
+		if (ofdm[0] > priv->ht40_2s_tx_power_index_diff[group].a)
+			ofdm[0] -=  priv->ht40_2s_tx_power_index_diff[group].a;
+		if (ofdm[1] > priv->ht40_2s_tx_power_index_diff[group].b)
+			ofdm[1] -=  priv->ht40_2s_tx_power_index_diff[group].b;
+	}
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_CHANNEL)
+		dev_info(&priv->udev->dev,
+			 "%s: Setting TX power CCK A: %02x, "
+			 "CCK B: %02x, OFDM A: %02x, OFDM B: %02x\n",
+			 __func__, cck[0], cck[1], ofdm[0], ofdm[1]);
+
+	for (i = 0; i < RTL8723A_MAX_RF_PATHS; i++) {
+		if (cck[i] > RF6052_MAX_TX_PWR)
+			cck[i] = RF6052_MAX_TX_PWR;
+		if (ofdm[i] > RF6052_MAX_TX_PWR)
+			ofdm[i] = RF6052_MAX_TX_PWR;
+	}
+
+	val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
+	val32 &= 0xffff00ff;
+	val32 |= (cck[0] << 8);
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+	val32 &= 0xff;
+	val32 |= ((cck[0] << 8) | (cck[0] << 16) | (cck[0] << 24));
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+	val32 &= 0xffffff00;
+	val32 |= cck[1];
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK1_55_MCS32);
+	val32 &= 0xff;
+	val32 |= ((cck[1] << 8) | (cck[1] << 16) | (cck[1] << 24));
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK1_55_MCS32, val32);
+
+	ofdm_a = ofdmbase[0] | ofdmbase[0] << 8 |
+		ofdmbase[0] << 16 | ofdmbase[0] << 24;
+	ofdm_b = ofdmbase[1] | ofdmbase[1] << 8 |
+		ofdmbase[1] << 16 | ofdmbase[1] << 24;
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm_a);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm_b);
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm_a);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm_b);
+
+	mcs_a = mcsbase[0] | mcsbase[0] << 8 |
+		mcsbase[0] << 16 | mcsbase[0] << 24;
+	mcs_b = mcsbase[1] | mcsbase[1] << 8 |
+		mcsbase[1] << 16 | mcsbase[1] << 24;
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs_a);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs_b);
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs_a);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs_b);
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs_a);
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs_b);
+
+	rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs_a);
+	for (i = 0; i < 3; i++) {
+		if (i != 2)
+			val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
+		else
+			val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
+		rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
+	}
+	rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs_b);
+	for (i = 0; i < 3; i++) {
+		if (i != 2)
+			val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
+		else
+			val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0;
+		rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8);
+	}
+}
+
+static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
+				  enum nl80211_iftype linktype)
+{
+	u16 val8;
+
+	val8 = rtl8xxxu_read16(priv, REG_MSR);
+	val8 &= ~MSR_LINKTYPE_MASK;
+
+	switch (linktype) {
+	case NL80211_IFTYPE_UNSPECIFIED:
+		val8 |= MSR_LINKTYPE_NONE;
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		val8 |= MSR_LINKTYPE_ADHOC;
+		break;
+	case NL80211_IFTYPE_STATION:
+		val8 |= MSR_LINKTYPE_STATION;
+		break;
+	case NL80211_IFTYPE_AP:
+		val8 |= MSR_LINKTYPE_AP;
+		break;
+	default:
+		goto out;
+	}
+
+	rtl8xxxu_write8(priv, REG_MSR, val8);
+out:
+	return;
+}
+
+static void
+rtl8xxxu_set_retry(struct rtl8xxxu_priv *priv, u16 short_retry, u16 long_retry)
+{
+	u16 val16;
+
+	val16 = ((short_retry << RETRY_LIMIT_SHORT_SHIFT) &
+		 RETRY_LIMIT_SHORT_MASK) |
+		((long_retry << RETRY_LIMIT_LONG_SHIFT) &
+		 RETRY_LIMIT_LONG_MASK);
+
+	rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
+}
+
+static void
+rtl8xxxu_set_spec_sifs(struct rtl8xxxu_priv *priv, u16 cck, u16 ofdm)
+{
+	u16 val16;
+
+	val16 = ((cck << SPEC_SIFS_CCK_SHIFT) & SPEC_SIFS_CCK_MASK) |
+		((ofdm << SPEC_SIFS_OFDM_SHIFT) & SPEC_SIFS_OFDM_MASK);
+
+	rtl8xxxu_write16(priv, REG_SPEC_SIFS, val16);
+}
+
+static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
+{
+	struct device *dev = &priv->udev->dev;
+	char *cut;
+
+	switch (priv->chip_cut) {
+	case 0:
+		cut = "A";
+		break;
+	case 1:
+		cut = "B";
+		break;
+	default:
+		cut = "unknown";
+	}
+
+	dev_info(dev,
+		 "RTL%s rev %s (%s) %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n",
+		 priv->chip_name, cut, priv->vendor_umc ? "UMC" : "TSMC",
+		 priv->tx_paths, priv->rx_paths, priv->ep_tx_count,
+		 priv->has_wifi, priv->has_bluetooth, priv->has_gps,
+		 priv->hi_pa);
+
+	dev_info(dev, "RTL%s MAC: %pM\n", priv->chip_name, priv->mac_addr);
+}
+
+static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
+{
+	struct device *dev = &priv->udev->dev;
+	u32 val32, bonding;
+	u16 val16;
+
+	val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
+	priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >>
+		SYS_CFG_CHIP_VERSION_SHIFT;
+	if (val32 & SYS_CFG_TRP_VAUX_EN) {
+		dev_info(dev, "Unsupported test chip\n");
+		return -ENOTSUPP;
+	}
+
+	if (val32 & SYS_CFG_BT_FUNC) {
+		sprintf(priv->chip_name, "8723AU");
+		priv->rf_paths = 1;
+		priv->rx_paths = 1;
+		priv->tx_paths = 1;
+		priv->rtlchip = 0x8723a;
+
+		val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL);
+		if (val32 & MULTI_WIFI_FUNC_EN)
+			priv->has_wifi = 1;
+		if (val32 & MULTI_BT_FUNC_EN)
+			priv->has_bluetooth = 1;
+		if (val32 & MULTI_GPS_FUNC_EN)
+			priv->has_gps = 1;
+	} else if (val32 & SYS_CFG_TYPE_ID) {
+		bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
+		bonding &= HPON_FSM_BONDING_MASK;
+		if (bonding == HPON_FSM_BONDING_1T2R) {
+			sprintf(priv->chip_name, "8191CU");
+			priv->rf_paths = 2;
+			priv->rx_paths = 2;
+			priv->tx_paths = 1;
+			priv->rtlchip = 0x8191c;
+		} else {
+			sprintf(priv->chip_name, "8192CU");
+			priv->rf_paths = 2;
+			priv->rx_paths = 2;
+			priv->tx_paths = 2;
+			priv->rtlchip = 0x8192c;
+		}
+		priv->has_wifi = 1;
+	} else {
+		sprintf(priv->chip_name, "8188CU");
+		priv->rf_paths = 1;
+		priv->rx_paths = 1;
+		priv->tx_paths = 1;
+		priv->rtlchip = 0x8188c;
+		priv->has_wifi = 1;
+	}
+
+	if (val32 & SYS_CFG_VENDOR_ID)
+		priv->vendor_umc = 1;
+
+	val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS);
+	priv->rom_rev = (val32 & GPIO_RF_RL_ID) >> 28;
+
+	val16 = rtl8xxxu_read16(priv, REG_NORMAL_SIE_EP_TX);
+	if (val16 & NORMAL_SIE_EP_TX_HIGH_MASK) {
+		priv->ep_tx_high_queue = 1;
+		priv->ep_tx_count++;
+	}
+
+	if (val16 & NORMAL_SIE_EP_TX_NORMAL_MASK) {
+		priv->ep_tx_normal_queue = 1;
+		priv->ep_tx_count++;
+	}
+
+	if (val16 & NORMAL_SIE_EP_TX_LOW_MASK) {
+		priv->ep_tx_low_queue = 1;
+		priv->ep_tx_count++;
+	}
+
+	/*
+	 * Fallback for devices that do not provide REG_NORMAL_SIE_EP_TX
+	 */
+	if (!priv->ep_tx_count) {
+		switch (priv->nr_out_eps) {
+		case 3:
+			priv->ep_tx_low_queue = 1;
+			priv->ep_tx_count++;
+		case 2:
+			priv->ep_tx_normal_queue = 1;
+			priv->ep_tx_count++;
+		case 1:
+			priv->ep_tx_high_queue = 1;
+			priv->ep_tx_count++;
+			break;
+		default:
+			dev_info(dev, "Unsupported USB TX end-points\n");
+			return -ENOTSUPP;
+		}
+	}
+
+	return 0;
+}
+
+static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+	if (priv->efuse_wifi.efuse8723.rtl_id != cpu_to_le16(0x8129))
+		return -EINVAL;
+
+	ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8723.mac_addr);
+
+	memcpy(priv->cck_tx_power_index_A,
+	       priv->efuse_wifi.efuse8723.cck_tx_power_index_A,
+	       sizeof(priv->cck_tx_power_index_A));
+	memcpy(priv->cck_tx_power_index_B,
+	       priv->efuse_wifi.efuse8723.cck_tx_power_index_B,
+	       sizeof(priv->cck_tx_power_index_B));
+
+	memcpy(priv->ht40_1s_tx_power_index_A,
+	       priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_A,
+	       sizeof(priv->ht40_1s_tx_power_index_A));
+	memcpy(priv->ht40_1s_tx_power_index_B,
+	       priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_B,
+	       sizeof(priv->ht40_1s_tx_power_index_B));
+
+	memcpy(priv->ht20_tx_power_index_diff,
+	       priv->efuse_wifi.efuse8723.ht20_tx_power_index_diff,
+	       sizeof(priv->ht20_tx_power_index_diff));
+	memcpy(priv->ofdm_tx_power_index_diff,
+	       priv->efuse_wifi.efuse8723.ofdm_tx_power_index_diff,
+	       sizeof(priv->ofdm_tx_power_index_diff));
+
+	memcpy(priv->ht40_max_power_offset,
+	       priv->efuse_wifi.efuse8723.ht40_max_power_offset,
+	       sizeof(priv->ht40_max_power_offset));
+	memcpy(priv->ht20_max_power_offset,
+	       priv->efuse_wifi.efuse8723.ht20_max_power_offset,
+	       sizeof(priv->ht20_max_power_offset));
+
+	dev_info(&priv->udev->dev, "Vendor: %.7s\n",
+		 priv->efuse_wifi.efuse8723.vendor_name);
+	dev_info(&priv->udev->dev, "Product: %.41s\n",
+		 priv->efuse_wifi.efuse8723.device_name);
+	return 0;
+}
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+
+static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+	int i;
+
+	if (priv->efuse_wifi.efuse8192.rtl_id != cpu_to_le16(0x8129))
+		return -EINVAL;
+
+	ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8192.mac_addr);
+
+	memcpy(priv->cck_tx_power_index_A,
+	       priv->efuse_wifi.efuse8192.cck_tx_power_index_A,
+	       sizeof(priv->cck_tx_power_index_A));
+	memcpy(priv->cck_tx_power_index_B,
+	       priv->efuse_wifi.efuse8192.cck_tx_power_index_B,
+	       sizeof(priv->cck_tx_power_index_B));
+
+	memcpy(priv->ht40_1s_tx_power_index_A,
+	       priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_A,
+	       sizeof(priv->ht40_1s_tx_power_index_A));
+	memcpy(priv->ht40_1s_tx_power_index_B,
+	       priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_B,
+	       sizeof(priv->ht40_1s_tx_power_index_B));
+	memcpy(priv->ht40_2s_tx_power_index_diff,
+	       priv->efuse_wifi.efuse8192.ht40_2s_tx_power_index_diff,
+	       sizeof(priv->ht40_2s_tx_power_index_diff));
+
+	memcpy(priv->ht20_tx_power_index_diff,
+	       priv->efuse_wifi.efuse8192.ht20_tx_power_index_diff,
+	       sizeof(priv->ht20_tx_power_index_diff));
+	memcpy(priv->ofdm_tx_power_index_diff,
+	       priv->efuse_wifi.efuse8192.ofdm_tx_power_index_diff,
+	       sizeof(priv->ofdm_tx_power_index_diff));
+
+	memcpy(priv->ht40_max_power_offset,
+	       priv->efuse_wifi.efuse8192.ht40_max_power_offset,
+	       sizeof(priv->ht40_max_power_offset));
+	memcpy(priv->ht20_max_power_offset,
+	       priv->efuse_wifi.efuse8192.ht20_max_power_offset,
+	       sizeof(priv->ht20_max_power_offset));
+
+	dev_info(&priv->udev->dev, "Vendor: %.7s\n",
+		 priv->efuse_wifi.efuse8192.vendor_name);
+	dev_info(&priv->udev->dev, "Product: %.20s\n",
+		 priv->efuse_wifi.efuse8192.device_name);
+
+	if (priv->efuse_wifi.efuse8192.rf_regulatory & 0x20) {
+		sprintf(priv->chip_name, "8188RU");
+		priv->hi_pa = 1;
+	}
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+		unsigned char *raw = priv->efuse_wifi.raw;
+
+		dev_info(&priv->udev->dev,
+			 "%s: dumping efuse (0x%02zx bytes):\n",
+			 __func__, sizeof(struct rtl8192cu_efuse));
+		for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8) {
+			dev_info(&priv->udev->dev, "%02x: "
+				 "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+				 raw[i], raw[i + 1], raw[i + 2],
+				 raw[i + 3], raw[i + 4], raw[i + 5],
+				 raw[i + 6], raw[i + 7]);
+		}
+	}
+	return 0;
+}
+
+#endif
+
+static int
+rtl8xxxu_read_efuse8(struct rtl8xxxu_priv *priv, u16 offset, u8 *data)
+{
+	int i;
+	u8 val8;
+	u32 val32;
+
+	/* Write Address */
+	rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 1, offset & 0xff);
+	val8 = rtl8xxxu_read8(priv, REG_EFUSE_CTRL + 2);
+	val8 &= 0xfc;
+	val8 |= (offset >> 8) & 0x03;
+	rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 2, val8);
+
+	val8 = rtl8xxxu_read8(priv, REG_EFUSE_CTRL + 3);
+	rtl8xxxu_write8(priv, REG_EFUSE_CTRL + 3, val8 & 0x7f);
+
+	/* Poll for data read */
+	val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+	for (i = 0; i < RTL8XXXU_MAX_REG_POLL; i++) {
+		val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+		if (val32 & BIT(31))
+			break;
+	}
+
+	if (i == RTL8XXXU_MAX_REG_POLL)
+		return -EIO;
+
+	udelay(50);
+	val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+
+	*data = val32 & 0xff;
+	return 0;
+}
+
+static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv)
+{
+	struct device *dev = &priv->udev->dev;
+	int i, ret = 0;
+	u8 val8, word_mask, header, extheader;
+	u16 val16, efuse_addr, offset;
+	u32 val32;
+
+	val16 = rtl8xxxu_read16(priv, REG_9346CR);
+	if (val16 & EEPROM_ENABLE)
+		priv->has_eeprom = 1;
+	if (val16 & EEPROM_BOOT)
+		priv->boot_eeprom = 1;
+
+	val32 = rtl8xxxu_read32(priv, REG_EFUSE_TEST);
+	val32 = (val32 & ~EFUSE_SELECT_MASK) | EFUSE_WIFI_SELECT;
+	rtl8xxxu_write32(priv, REG_EFUSE_TEST, val32);
+
+	dev_dbg(dev, "Booting from %s\n",
+		priv->boot_eeprom ? "EEPROM" : "EFUSE");
+
+	rtl8xxxu_write8(priv, REG_EFUSE_ACCESS, EFUSE_ACCESS_ENABLE);
+
+	/*  1.2V Power: From VDDON with Power Cut(0x0000[15]), default valid */
+	val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL);
+	if (!(val16 & SYS_ISO_PWC_EV12V)) {
+		val16 |= SYS_ISO_PWC_EV12V;
+		rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16);
+	}
+	/*  Reset: 0x0000[28], default valid */
+	val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+	if (!(val16 & SYS_FUNC_ELDR)) {
+		val16 |= SYS_FUNC_ELDR;
+		rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+	}
+
+	/*
+	 * Clock: Gated(0x0008[5]) 8M(0x0008[1]) clock from ANA, default valid
+	 */
+	val16 = rtl8xxxu_read16(priv, REG_SYS_CLKR);
+	if (!(val16 & SYS_CLK_LOADER_ENABLE) || !(val16 & SYS_CLK_ANA8M)) {
+		val16 |= (SYS_CLK_LOADER_ENABLE | SYS_CLK_ANA8M);
+		rtl8xxxu_write16(priv, REG_SYS_CLKR, val16);
+	}
+
+	/* Default value is 0xff */
+	memset(priv->efuse_wifi.raw, 0xff, EFUSE_MAP_LEN_8723A);
+
+	efuse_addr = 0;
+	while (efuse_addr < EFUSE_REAL_CONTENT_LEN_8723A) {
+		ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &header);
+		if (ret || header == 0xff)
+			goto exit;
+
+		if ((header & 0x1f) == 0x0f) {	/* extended header */
+			offset = (header & 0xe0) >> 5;
+
+			ret = rtl8xxxu_read_efuse8(priv, efuse_addr++,
+						   &extheader);
+			if (ret)
+				goto exit;
+			/* All words disabled */
+			if ((extheader & 0x0f) == 0x0f)
+				continue;
+
+			offset |= ((extheader & 0xf0) >> 1);
+			word_mask = extheader & 0x0f;
+		} else {
+			offset = (header >> 4) & 0x0f;
+			word_mask = header & 0x0f;
+		}
+
+		if (offset < EFUSE_MAX_SECTION_8723A) {
+			u16 map_addr;
+			/* Get word enable value from PG header */
+
+			/* We have 8 bits to indicate validity */
+			map_addr = offset * 8;
+			if (map_addr >= EFUSE_MAP_LEN_8723A) {
+				dev_warn(dev, "%s: Illegal map_addr (%04x), "
+					 "efuse corrupt!\n",
+					 __func__, map_addr);
+				ret = -EINVAL;
+				goto exit;
+			}
+			for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+				/* Check word enable condition in the section */
+				if (!(word_mask & BIT(i))) {
+					ret = rtl8xxxu_read_efuse8(priv,
+								   efuse_addr++,
+								   &val8);
+					if (ret)
+						goto exit;
+					priv->efuse_wifi.raw[map_addr++] = val8;
+
+					ret = rtl8xxxu_read_efuse8(priv,
+								   efuse_addr++,
+								   &val8);
+					if (ret)
+						goto exit;
+					priv->efuse_wifi.raw[map_addr++] = val8;
+				} else
+					map_addr += 2;
+			}
+		} else {
+			dev_warn(dev,
+				 "%s: Illegal offset (%04x), efuse corrupt!\n",
+				 __func__, offset);
+			ret = -EINVAL;
+			goto exit;
+		}
+	}
+
+exit:
+	rtl8xxxu_write8(priv, REG_EFUSE_ACCESS, EFUSE_ACCESS_DISABLE);
+
+	return ret;
+}
+
+static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
+{
+	struct device *dev = &priv->udev->dev;
+	int ret = 0, i;
+	u32 val32;
+
+	/* Poll checksum report */
+	for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) {
+		val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+		if (val32 & MCU_FW_DL_CSUM_REPORT)
+			break;
+	}
+
+	if (i == RTL8XXXU_FIRMWARE_POLL_MAX) {
+		dev_warn(dev, "Firmware checksum poll timed out\n");
+		ret = -EAGAIN;
+		goto exit;
+	}
+
+	val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+	val32 |= MCU_FW_DL_READY;
+	val32 &= ~MCU_WINT_INIT_READY;
+	rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32);
+
+	/* Wait for firmware to become ready */
+	for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) {
+		val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+		if (val32 & MCU_WINT_INIT_READY)
+			break;
+
+		udelay(100);
+	}
+
+	if (i == RTL8XXXU_FIRMWARE_POLL_MAX) {
+		dev_warn(dev, "Firmware failed to start\n");
+		ret = -EAGAIN;
+		goto exit;
+	}
+
+exit:
+	return ret;
+}
+
+static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv)
+{
+	int pages, remainder, i, ret;
+	u8 val8;
+	u16 val16;
+	u32 val32;
+	u8 *fwptr;
+
+	val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC + 1);
+	val8 |= 4;
+	rtl8xxxu_write8(priv, REG_SYS_FUNC + 1, val8);
+
+	/* 8051 enable */
+	val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+	rtl8xxxu_write16(priv, REG_SYS_FUNC, val16 | SYS_FUNC_CPU_ENABLE);
+
+	/* MCU firmware download enable */
+	val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL);
+	rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_ENABLE);
+
+	/* 8051 reset */
+	val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL);
+	rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32 & ~BIT(19));
+
+	/* Reset firmware download checksum */
+	val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL);
+	rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_CSUM_REPORT);
+
+	pages = priv->fw_size / RTL_FW_PAGE_SIZE;
+	remainder = priv->fw_size % RTL_FW_PAGE_SIZE;
+
+	fwptr = priv->fw_data->data;
+
+	for (i = 0; i < pages; i++) {
+		val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8;
+		rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i);
+
+		ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS,
+				      fwptr, RTL_FW_PAGE_SIZE);
+		if (ret != RTL_FW_PAGE_SIZE) {
+			ret = -EAGAIN;
+			goto fw_abort;
+		}
+
+		fwptr += RTL_FW_PAGE_SIZE;
+	}
+
+	if (remainder) {
+		val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8;
+		rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i);
+		ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS,
+				      fwptr, remainder);
+		if (ret != remainder) {
+			ret = -EAGAIN;
+			goto fw_abort;
+		}
+	}
+
+	ret = 0;
+fw_abort:
+	/* MCU firmware download disable */
+	val16 = rtl8xxxu_read16(priv, REG_MCU_FW_DL);
+	rtl8xxxu_write16(priv, REG_MCU_FW_DL,
+			 val16 & (~MCU_FW_DL_ENABLE & 0xff));
+
+	return ret;
+}
+
+static int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
+{
+	struct device *dev = &priv->udev->dev;
+	const struct firmware *fw;
+	int ret = 0;
+	u16 signature;
+
+	dev_info(dev, "%s: Loading firmware %s\n", DRIVER_NAME, fw_name);
+	if (request_firmware(&fw, fw_name, &priv->udev->dev)) {
+		dev_warn(dev, "request_firmware(%s) failed\n", fw_name);
+		ret = -EAGAIN;
+		goto exit;
+	}
+	if (!fw) {
+		dev_warn(dev, "Firmware data not available\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	priv->fw_data = kmemdup(fw->data, fw->size, GFP_KERNEL);
+	priv->fw_size = fw->size - sizeof(struct rtl8xxxu_firmware_header);
+
+	signature = le16_to_cpu(priv->fw_data->signature);
+	switch (signature & 0xfff0) {
+	case 0x92c0:
+	case 0x88c0:
+	case 0x2300:
+		break;
+	default:
+		ret = -EINVAL;
+		dev_warn(dev, "%s: Invalid firmware signature: 0x%04x\n",
+			 __func__, signature);
+	}
+
+	dev_info(dev, "Firmware revision %i.%i (signature 0x%04x)\n",
+		 le16_to_cpu(priv->fw_data->major_version),
+		 priv->fw_data->minor_version, signature);
+
+exit:
+	release_firmware(fw);
+	return ret;
+}
+
+static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv)
+{
+	char *fw_name;
+	int ret;
+
+	switch (priv->chip_cut) {
+	case 0:
+		fw_name = "rtlwifi/rtl8723aufw_A.bin";
+		break;
+	case 1:
+		if (priv->enable_bluetooth)
+			fw_name = "rtlwifi/rtl8723aufw_B.bin";
+		else
+			fw_name = "rtlwifi/rtl8723aufw_B_NoBT.bin";
+
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = rtl8xxxu_load_firmware(priv, fw_name);
+	return ret;
+}
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+
+static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+	char *fw_name;
+	int ret;
+
+	if (!priv->vendor_umc)
+		fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+	else if (priv->chip_cut || priv->rtlchip == 0x8192c)
+		fw_name = "rtlwifi/rtl8192cufw_B.bin";
+	else
+		fw_name = "rtlwifi/rtl8192cufw_A.bin";
+
+	ret = rtl8xxxu_load_firmware(priv, fw_name);
+
+	return ret;
+}
+
+#endif
+
+static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
+{
+	u16 val16;
+	int i = 100;
+
+	/* Inform 8051 to perform reset */
+	rtl8xxxu_write8(priv, REG_HMTFR + 3, 0x20);
+
+	for (i = 100; i > 0; i--) {
+		val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+
+		if (!(val16 & SYS_FUNC_CPU_ENABLE)) {
+			dev_dbg(&priv->udev->dev,
+				"%s: Firmware self reset success!\n", __func__);
+			break;
+		}
+		udelay(50);
+	}
+
+	if (!i) {
+		/* Force firmware reset */
+		val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+		val16 &= ~SYS_FUNC_CPU_ENABLE;
+		rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+	}
+}
+
+static int
+rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
+{
+	int i, ret;
+	u16 reg;
+	u8 val;
+
+	for (i = 0; ; i++) {
+		reg = array[i].reg;
+		val = array[i].val;
+
+		if (reg == 0xffff && val == 0xff)
+			break;
+
+		ret = rtl8xxxu_write8(priv, reg, val);
+		if (ret != 1) {
+			dev_warn(&priv->udev->dev,
+				 "Failed to initialize MAC\n");
+			return -EAGAIN;
+		}
+	}
+
+	rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
+
+	return 0;
+}
+
+static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
+				  struct rtl8xxxu_reg32val *array)
+{
+	int i, ret;
+	u16 reg;
+	u32 val;
+
+	for (i = 0; ; i++) {
+		reg = array[i].reg;
+		val = array[i].val;
+
+		if (reg == 0xffff && val == 0xffffffff)
+			break;
+
+		ret = rtl8xxxu_write32(priv, reg, val);
+		if (ret != sizeof(val)) {
+			dev_warn(&priv->udev->dev,
+				 "Failed to initialize PHY\n");
+			return -EAGAIN;
+		}
+		udelay(1);
+	}
+
+	return 0;
+}
+
+/*
+ * Most of this is black magic retrieved from the old rtl8723au driver
+ */
+static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+	u8 val8, ldoa15, ldov12d, lpldo, ldohci12;
+	u32 val32;
+
+	/*
+	 * Todo: The vendor driver maintains a table of PHY register
+	 *       addresses, which is initialized here. Do we need this?
+	 */
+
+	val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+	udelay(2);
+	val8 |= AFE_PLL_320_ENABLE;
+	rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+	udelay(2);
+
+	rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
+	udelay(2);
+
+	val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+	val8 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
+	rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+	/* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */
+	val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+	val32 &= ~AFE_XTAL_RF_GATE;
+	if (priv->has_bluetooth)
+		val32 &= ~AFE_XTAL_BT_GATE;
+	rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
+
+	/* 6. 0x1f[7:0] = 0x07 */
+	val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+	rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+	if (priv->hi_pa)
+		rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table);
+	else if (priv->tx_paths == 2)
+		rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table);
+	else
+		rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table);
+
+
+	if (priv->rtlchip == 0x8188c && priv->hi_pa &&
+	    priv->vendor_umc && priv->chip_cut == 1)
+		rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50);
+
+	if (priv->tx_paths == 1 && priv->rx_paths == 2) {
+		/*
+		 * For 1T2R boards, patch the registers.
+		 *
+		 * It looks like 8191/2 1T2R boards use path B for TX
+		 */
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_TX_INFO);
+		val32 &= ~(BIT(0) | BIT(1));
+		val32 |= BIT(1);
+		rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_FPGA1_TX_INFO);
+		val32 &= ~0x300033;
+		val32 |= 0x200022;
+		rtl8xxxu_write32(priv, REG_FPGA1_TX_INFO, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+		val32 &= 0xff000000;
+		val32 |= 0x45000000;
+		rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+		val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK);
+		val32 |= (OFDM_RF_PATH_RX_A | OFDM_RF_PATH_RX_B |
+			  OFDM_RF_PATH_TX_B);
+		rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_OFDM0_AGC_PARM1);
+		val32 &= ~(BIT(4) | BIT(5));
+		val32 |= BIT(4);
+		rtl8xxxu_write32(priv, REG_OFDM0_AGC_PARM1, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_TX_CCK_RFON);
+		val32 &= ~(BIT(27) | BIT(26));
+		val32 |= BIT(27);
+		rtl8xxxu_write32(priv, REG_TX_CCK_RFON, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_TX_CCK_BBON);
+		val32 &= ~(BIT(27) | BIT(26));
+		val32 |= BIT(27);
+		rtl8xxxu_write32(priv, REG_TX_CCK_BBON, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_TX_OFDM_RFON);
+		val32 &= ~(BIT(27) | BIT(26));
+		val32 |= BIT(27);
+		rtl8xxxu_write32(priv, REG_TX_OFDM_RFON, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_TX_OFDM_BBON);
+		val32 &= ~(BIT(27) | BIT(26));
+		val32 |= BIT(27);
+		rtl8xxxu_write32(priv, REG_TX_OFDM_BBON, val32);
+
+		val32 = rtl8xxxu_read32(priv, REG_TX_TO_TX);
+		val32 &= ~(BIT(27) | BIT(26));
+		val32 |= BIT(27);
+		rtl8xxxu_write32(priv, REG_TX_TO_TX, val32);
+	}
+
+	if (priv->hi_pa)
+		rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
+	else
+		rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
+
+	if (priv->rtlchip == 0x8723a &&
+	    priv->efuse_wifi.efuse8723.version >= 0x01) {
+		val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL);
+
+		val8 = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
+		val32 &= 0xff000fff;
+		val32 |= ((val8 | (val8 << 6)) << 12);
+
+		rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32);
+	}
+
+	ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
+	ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
+	ldohci12 = 0x57;
+	lpldo = 1;
+	val32 = (lpldo << 24) | (ldohci12 << 16) | (ldov12d << 8) | ldoa15;
+
+	rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
+
+	return 0;
+}
+
+static int rtl8xxxu_init_rf_regs(struct rtl8xxxu_priv *priv,
+				 struct rtl8xxxu_rfregval *array,
+				 enum rtl8xxxu_rfpath path)
+{
+	int i, ret;
+	u8 reg;
+	u32 val;
+
+	for (i = 0; ; i++) {
+		reg = array[i].reg;
+		val = array[i].val;
+
+		if (reg == 0xff && val == 0xffffffff)
+			break;
+
+		switch (reg) {
+		case 0xfe:
+			msleep(50);
+			continue;
+		case 0xfd:
+			mdelay(5);
+			continue;
+		case 0xfc:
+			mdelay(1);
+			continue;
+		case 0xfb:
+			udelay(50);
+			continue;
+		case 0xfa:
+			udelay(5);
+			continue;
+		case 0xf9:
+			udelay(1);
+			continue;
+		}
+
+		reg &= 0x3f;
+
+		ret = rtl8xxxu_write_rfreg(priv, path, reg, val);
+		if (ret) {
+			dev_warn(&priv->udev->dev,
+				 "Failed to initialize RF\n");
+			return -EAGAIN;
+		}
+		udelay(1);
+	}
+
+	return 0;
+}
+
+static int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
+				struct rtl8xxxu_rfregval *table,
+				enum rtl8xxxu_rfpath path)
+{
+	u32 val32;
+	u16 val16, rfsi_rfenv;
+	u16 reg_sw_ctrl, reg_int_oe, reg_hssi_parm2;
+
+	switch (path) {
+	case RF_A:
+		reg_sw_ctrl = REG_FPGA0_XA_RF_SW_CTRL;
+		reg_int_oe = REG_FPGA0_XA_RF_INT_OE;
+		reg_hssi_parm2 = REG_FPGA0_XA_HSSI_PARM2;
+		break;
+	case RF_B:
+		reg_sw_ctrl = REG_FPGA0_XB_RF_SW_CTRL;
+		reg_int_oe = REG_FPGA0_XB_RF_INT_OE;
+		reg_hssi_parm2 = REG_FPGA0_XB_HSSI_PARM2;
+		break;
+	default:
+		dev_err(&priv->udev->dev, "%s:Unsupported RF path %c\n",
+			__func__, path + 'A');
+		return -EINVAL;
+	}
+	/* For path B, use XB */
+	rfsi_rfenv = rtl8xxxu_read16(priv, reg_sw_ctrl);
+	rfsi_rfenv &= FPGA0_RF_RFENV;
+
+	/*
+	 * These two we might be able to optimize into one
+	 */
+	val32 = rtl8xxxu_read32(priv, reg_int_oe);
+	val32 |= BIT(20);	/* 0x10 << 16 */
+	rtl8xxxu_write32(priv, reg_int_oe, val32);
+	udelay(1);
+
+	val32 = rtl8xxxu_read32(priv, reg_int_oe);
+	val32 |= BIT(4);
+	rtl8xxxu_write32(priv, reg_int_oe, val32);
+	udelay(1);
+
+	/*
+	 * These two we might be able to optimize into one
+	 */
+	val32 = rtl8xxxu_read32(priv, reg_hssi_parm2);
+	val32 &= ~FPGA0_HSSI_3WIRE_ADDR_LEN;
+	rtl8xxxu_write32(priv, reg_hssi_parm2, val32);
+	udelay(1);
+
+	val32 = rtl8xxxu_read32(priv, reg_hssi_parm2);
+	val32 &= ~FPGA0_HSSI_3WIRE_DATA_LEN;
+	rtl8xxxu_write32(priv, reg_hssi_parm2, val32);
+	udelay(1);
+
+	rtl8xxxu_init_rf_regs(priv, table, path);
+
+	/* For path B, use XB */
+	val16 = rtl8xxxu_read16(priv, reg_sw_ctrl);
+	val16 &= ~FPGA0_RF_RFENV;
+	val16 |= rfsi_rfenv;
+	rtl8xxxu_write16(priv, reg_sw_ctrl, val16);
+
+	return 0;
+}
+
+static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data)
+{
+	int ret = -EBUSY;
+	int count = 0;
+	u32 value;
+
+	value = LLT_OP_WRITE | address << 8 | data;
+
+	rtl8xxxu_write32(priv, REG_LLT_INIT, value);
+
+	do {
+		value = rtl8xxxu_read32(priv, REG_LLT_INIT);
+		if ((value & LLT_OP_MASK) == LLT_OP_INACTIVE) {
+			ret = 0;
+			break;
+		}
+	} while (count++ < 20);
+
+	return ret;
+}
+
+static int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+{
+	int ret;
+	int i;
+
+	for (i = 0; i < last_tx_page; i++) {
+		ret = rtl8xxxu_llt_write(priv, i, i + 1);
+		if (ret)
+			goto exit;
+	}
+
+	ret = rtl8xxxu_llt_write(priv, last_tx_page, 0xff);
+	if (ret)
+		goto exit;
+
+	/* Mark remaining pages as a ring buffer */
+	for (i = last_tx_page + 1; i < 0xff; i++) {
+		ret = rtl8xxxu_llt_write(priv, i, (i + 1));
+		if (ret)
+			goto exit;
+	}
+
+	/*  Let last entry point to the start entry of ring buffer */
+	ret = rtl8xxxu_llt_write(priv, 0xff, last_tx_page + 1);
+	if (ret)
+		goto exit;
+
+exit:
+	return ret;
+}
+
+static int rtl8xxxu_init_queue_priority(struct rtl8xxxu_priv *priv)
+{
+	u16 val16, hi, lo;
+	u16 hiq, mgq, bkq, beq, viq, voq;
+	int hip, mgp, bkp, bep, vip, vop;
+	int ret = 0;
+
+	switch (priv->ep_tx_count) {
+	case 1:
+		if (priv->ep_tx_high_queue) {
+			hi = TRXDMA_QUEUE_HIGH;
+		} else if (priv->ep_tx_low_queue) {
+			hi = TRXDMA_QUEUE_LOW;
+		} else if (priv->ep_tx_normal_queue) {
+			hi = TRXDMA_QUEUE_NORMAL;
+		} else {
+			hi = 0;
+			ret = -EINVAL;
+		}
+
+		hiq = hi;
+		mgq = hi;
+		bkq = hi;
+		beq = hi;
+		viq = hi;
+		voq = hi;
+
+		hip = 0;
+		mgp = 0;
+		bkp = 0;
+		bep = 0;
+		vip = 0;
+		vop = 0;
+		break;
+	case 2:
+		if (priv->ep_tx_high_queue && priv->ep_tx_low_queue) {
+			hi = TRXDMA_QUEUE_HIGH;
+			lo = TRXDMA_QUEUE_LOW;
+		} else if (priv->ep_tx_normal_queue && priv->ep_tx_low_queue) {
+			hi = TRXDMA_QUEUE_NORMAL;
+			lo = TRXDMA_QUEUE_LOW;
+		} else if (priv->ep_tx_high_queue && priv->ep_tx_normal_queue) {
+			hi = TRXDMA_QUEUE_HIGH;
+			lo = TRXDMA_QUEUE_NORMAL;
+		} else {
+			ret = -EINVAL;
+			hi = 0;
+			lo = 0;
+		}
+
+		hiq = hi;
+		mgq = hi;
+		bkq = lo;
+		beq = lo;
+		viq = hi;
+		voq = hi;
+
+		hip = 0;
+		mgp = 0;
+		bkp = 1;
+		bep = 1;
+		vip = 0;
+		vop = 0;
+		break;
+	case 3:
+		beq = TRXDMA_QUEUE_LOW;
+		bkq = TRXDMA_QUEUE_LOW;
+		viq = TRXDMA_QUEUE_NORMAL;
+		voq = TRXDMA_QUEUE_HIGH;
+		mgq = TRXDMA_QUEUE_HIGH;
+		hiq = TRXDMA_QUEUE_HIGH;
+
+		hip = hiq ^ 3;
+		mgp = mgq ^ 3;
+		bkp = bkq ^ 3;
+		bep = beq ^ 3;
+		vip = viq ^ 3;
+		vop = viq ^ 3;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	/*
+	 * None of the vendor drivers are configuring the beacon
+	 * queue here .... why?
+	 */
+	if (!ret) {
+		val16 = rtl8xxxu_read16(priv, REG_TRXDMA_CTRL);
+		val16 &= 0x7;
+		val16 |= (voq << TRXDMA_CTRL_VOQ_SHIFT) |
+			(viq << TRXDMA_CTRL_VIQ_SHIFT) |
+			(beq << TRXDMA_CTRL_BEQ_SHIFT) |
+			(bkq << TRXDMA_CTRL_BKQ_SHIFT) |
+			(mgq << TRXDMA_CTRL_MGQ_SHIFT) |
+			(hiq << TRXDMA_CTRL_HIQ_SHIFT);
+		rtl8xxxu_write16(priv, REG_TRXDMA_CTRL, val16);
+
+		priv->pipe_out[TXDESC_QUEUE_VO] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[vop]);
+		priv->pipe_out[TXDESC_QUEUE_VI] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[vip]);
+		priv->pipe_out[TXDESC_QUEUE_BE] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[bep]);
+		priv->pipe_out[TXDESC_QUEUE_BK] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[bkp]);
+		priv->pipe_out[TXDESC_QUEUE_BEACON] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[0]);
+		priv->pipe_out[TXDESC_QUEUE_MGNT] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[mgp]);
+		priv->pipe_out[TXDESC_QUEUE_HIGH] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[hip]);
+		priv->pipe_out[TXDESC_QUEUE_CMD] =
+			usb_sndbulkpipe(priv->udev, priv->out_ep[0]);
+	}
+
+	return ret;
+}
+
+static void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv,
+				       bool iqk_ok, int result[][8],
+				       int candidate, bool tx_only)
+{
+	u32 oldval, x, tx0_a, reg;
+	int y, tx0_c;
+	u32 val32;
+
+	if (!iqk_ok)
+		return;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
+	oldval = val32 >> 22;
+
+	x = result[candidate][0];
+	if ((x & 0x00000200) != 0)
+		x = x | 0xfffffc00;
+	tx0_a = (x * oldval) >> 8;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
+	val32 &= ~0x3ff;
+	val32 |= tx0_a;
+	rtl8xxxu_write32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+	val32 &= ~BIT(31);
+	if ((x * oldval >> 7) & 0x1)
+		val32 |= BIT(31);
+	rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+	y = result[candidate][1];
+	if ((y & 0x00000200) != 0)
+		y = y | 0xfffffc00;
+	tx0_c = (y * oldval) >> 8;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XC_TX_AFE);
+	val32 &= ~0xf0000000;
+	val32 |= (((tx0_c & 0x3c0) >> 6) << 28);
+	rtl8xxxu_write32(priv, REG_OFDM0_XC_TX_AFE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE);
+	val32 &= ~0x003f0000;
+	val32 |= ((tx0_c & 0x3f) << 16);
+	rtl8xxxu_write32(priv, REG_OFDM0_XA_TX_IQ_IMBALANCE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+	val32 &= ~BIT(29);
+	if ((y * oldval >> 7) & 0x1)
+		val32 |= BIT(29);
+	rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+	if (tx_only) {
+		dev_dbg(&priv->udev->dev, "%s: only TX\n", __func__);
+		return;
+	}
+
+	reg = result[candidate][2];
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE);
+	val32 &= ~0x3ff;
+	val32 |= (reg & 0x3ff);
+	rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE, val32);
+
+	reg = result[candidate][3] & 0x3F;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE);
+	val32 &= ~0xfc00;
+	val32 |= ((reg << 10) & 0xfc00);
+	rtl8xxxu_write32(priv, REG_OFDM0_XA_RX_IQ_IMBALANCE, val32);
+
+	reg = (result[candidate][3] >> 6) & 0xF;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_RX_IQ_EXT_ANTA);
+	val32 &= ~0xf0000000;
+	val32 |= (reg << 28);
+	rtl8xxxu_write32(priv, REG_OFDM0_RX_IQ_EXT_ANTA, val32);
+}
+
+static void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv,
+				       bool iqk_ok, int result[][8],
+				       int candidate, bool tx_only)
+{
+	u32 oldval, x, tx1_a, reg;
+	int y, tx1_c;
+	u32 val32;
+
+	if (!iqk_ok)
+		return;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE);
+	oldval = val32 >> 22;
+
+	x = result[candidate][4];
+	if ((x & 0x00000200) != 0)
+		x = x | 0xfffffc00;
+	tx1_a = (x * oldval) >> 8;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE);
+	val32 &= ~0x3ff;
+	val32 |= tx1_a;
+	rtl8xxxu_write32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+	val32 &= ~BIT(27);
+	if ((x * oldval >> 7) & 0x1)
+		val32 |= BIT(27);
+	rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+	y = result[candidate][5];
+	if ((y & 0x00000200) != 0)
+		y = y | 0xfffffc00;
+	tx1_c = (y * oldval) >> 8;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XD_TX_AFE);
+	val32 &= ~0xf0000000;
+	val32 |= (((tx1_c & 0x3c0) >> 6) << 28);
+	rtl8xxxu_write32(priv, REG_OFDM0_XD_TX_AFE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE);
+	val32 &= ~0x003f0000;
+	val32 |= ((tx1_c & 0x3f) << 16);
+	rtl8xxxu_write32(priv, REG_OFDM0_XB_TX_IQ_IMBALANCE, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_ENERGY_CCA_THRES);
+	val32 &= ~BIT(25);
+	if ((y * oldval >> 7) & 0x1)
+		val32 |= BIT(25);
+	rtl8xxxu_write32(priv, REG_OFDM0_ENERGY_CCA_THRES, val32);
+
+	if (tx_only) {
+		dev_dbg(&priv->udev->dev, "%s: only TX\n", __func__);
+		return;
+	}
+
+	reg = result[candidate][6];
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE);
+	val32 &= ~0x3ff;
+	val32 |= (reg & 0x3ff);
+	rtl8xxxu_write32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE, val32);
+
+	reg = result[candidate][7] & 0x3f;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE);
+	val32 &= ~0xfc00;
+	val32 |= ((reg << 10) & 0xfc00);
+	rtl8xxxu_write32(priv, REG_OFDM0_XB_RX_IQ_IMBALANCE, val32);
+
+	reg = (result[candidate][7] >> 6) & 0xf;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_AGCR_SSI_TABLE);
+	val32 &= ~0x0000f000;
+	val32 |= (reg << 12);
+	rtl8xxxu_write32(priv, REG_OFDM0_AGCR_SSI_TABLE, val32);
+}
+
+#define MAX_TOLERANCE		5
+
+static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
+					int result[][8], int c1, int c2)
+{
+	u32 i, j, diff, simubitmap, bound = 0;
+	int candidate[2] = {-1, -1};	/* for path A and path B */
+	bool retval = true;
+
+	if (priv->tx_paths > 1)
+		bound = 8;
+	else
+		bound = 4;
+
+	simubitmap = 0;
+
+	for (i = 0; i < bound; i++) {
+		diff = (result[c1][i] > result[c2][i]) ?
+			(result[c1][i] - result[c2][i]) :
+			(result[c2][i] - result[c1][i]);
+		if (diff > MAX_TOLERANCE) {
+			if ((i == 2 || i == 6) && !simubitmap) {
+				if (result[c1][i] + result[c1][i + 1] == 0)
+					candidate[(i / 4)] = c2;
+				else if (result[c2][i] + result[c2][i + 1] == 0)
+					candidate[(i / 4)] = c1;
+				else
+					simubitmap = simubitmap | (1 << i);
+			} else {
+				simubitmap = simubitmap | (1 << i);
+			}
+		}
+	}
+
+	if (simubitmap == 0) {
+		for (i = 0; i < (bound / 4); i++) {
+			if (candidate[i] >= 0) {
+				for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+					result[3][j] = result[candidate[i]][j];
+				retval = false;
+			}
+		}
+		return retval;
+	} else if (!(simubitmap & 0x0f)) {
+		/* path A OK */
+		for (i = 0; i < 4; i++)
+			result[3][i] = result[c1][i];
+	} else if (!(simubitmap & 0xf0) && priv->tx_paths > 1) {
+		/* path B OK */
+		for (i = 4; i < 8; i++)
+			result[3][i] = result[c1][i];
+	}
+
+	return false;
+}
+
+static void
+rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv, const u32 *reg, u32 *backup)
+{
+	int i;
+
+	for (i = 0; i < (RTL8XXXU_MAC_REGS - 1); i++)
+		backup[i] = rtl8xxxu_read8(priv, reg[i]);
+
+	backup[i] = rtl8xxxu_read32(priv, reg[i]);
+}
+
+static void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv,
+				      const u32 *reg, u32 *backup)
+{
+	int i;
+
+	for (i = 0; i < (RTL8XXXU_MAC_REGS - 1); i++)
+		rtl8xxxu_write8(priv, reg[i], backup[i]);
+
+	rtl8xxxu_write32(priv, reg[i], backup[i]);
+}
+
+static void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+			       u32 *backup, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++)
+		backup[i] = rtl8xxxu_read32(priv, regs[i]);
+}
+
+static void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+				  u32 *backup, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++)
+		rtl8xxxu_write32(priv, regs[i], backup[i]);
+}
+
+
+static void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
+				  bool path_a_on)
+{
+	u32 path_on;
+	int i;
+
+	path_on = path_a_on ? 0x04db25a4 : 0x0b1b25a4;
+	if (priv->tx_paths == 1) {
+		path_on = 0x0bdb25a0;
+		rtl8xxxu_write32(priv, regs[0], 0x0b1b25a0);
+	} else {
+		rtl8xxxu_write32(priv, regs[0], path_on);
+	}
+
+	for (i = 1 ; i < RTL8XXXU_ADDA_REGS ; i++)
+		rtl8xxxu_write32(priv, regs[i], path_on);
+}
+
+static void rtl8xxxu_mac_calibration(struct rtl8xxxu_priv *priv,
+				     const u32 *regs, u32 *backup)
+{
+	int i = 0;
+
+	rtl8xxxu_write8(priv, regs[i], 0x3f);
+
+	for (i = 1 ; i < (RTL8XXXU_MAC_REGS - 1); i++)
+		rtl8xxxu_write8(priv, regs[i], (u8)(backup[i] & ~BIT(3)));
+
+	rtl8xxxu_write8(priv, regs[i], (u8)(backup[i] & ~BIT(5)));
+}
+
+static int rtl8xxxu_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+	u32 reg_eac, reg_e94, reg_e9c, reg_ea4, val32;
+	int result = 0;
+
+	/* path-A IQK setting */
+	rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x10008c1f);
+	rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x10008c1f);
+	rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82140102);
+
+	val32 = (priv->rf_paths > 1) ? 0x28160202 :
+		/*IS_81xxC_VENDOR_UMC_B_CUT(pHalData->VersionID)?0x28160202: */
+		0x28160502;
+	rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, val32);
+
+	/* path-B IQK setting */
+	if (priv->rf_paths > 1) {
+		rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x10008c22);
+		rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x10008c22);
+		rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82140102);
+		rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28160202);
+	}
+
+	/* LO calibration setting */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x001028d1);
+
+	/* One shot, path A LOK & IQK */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+	rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+	mdelay(1);
+
+	/* Check failed */
+	reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+	reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+	reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+	reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+	if (!(reg_eac & BIT(28)) &&
+	    ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+	    ((reg_e9c & 0x03ff0000) != 0x00420000))
+		result |= 0x01;
+	else	/* If TX not OK, ignore RX */
+		goto out;
+
+	/* If TX is OK, check whether RX is OK */
+	if (!(reg_eac & BIT(27)) &&
+	    ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+	    ((reg_eac & 0x03ff0000) != 0x00360000))
+		result |= 0x02;
+	else
+		dev_warn(&priv->udev->dev, "%s: Path A RX IQK failed!\n",
+			 __func__);
+out:
+	return result;
+}
+
+static int rtl8xxxu_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+	u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+	int result = 0;
+
+	/* One shot, path B LOK & IQK */
+	rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000002);
+	rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000000);
+
+	mdelay(1);
+
+	/* Check failed */
+	reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+	reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+	reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+	reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+	reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+
+	if (!(reg_eac & BIT(31)) &&
+	    ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
+	    ((reg_ebc & 0x03ff0000) != 0x00420000))
+		result |= 0x01;
+	else
+		goto out;
+
+	if (!(reg_eac & BIT(30)) &&
+	    (((reg_ec4 & 0x03ff0000) >> 16) != 0x132) &&
+	    (((reg_ecc & 0x03ff0000) >> 16) != 0x36))
+		result |= 0x02;
+	else
+		dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n",
+			 __func__);
+out:
+	return result;
+}
+
+static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+				     int result[][8], int t)
+{
+	struct device *dev = &priv->udev->dev;
+	u32 i, val32;
+	int path_a_ok, path_b_ok;
+	int retry = 2;
+	const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+		REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+		REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+		REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+		REG_TX_OFDM_BBON, REG_TX_TO_RX,
+		REG_TX_TO_TX, REG_RX_CCK,
+		REG_RX_OFDM, REG_RX_WAIT_RIFS,
+		REG_RX_TO_RX, REG_STANDBY,
+		REG_SLEEP, REG_PMPD_ANAEN
+	};
+	const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+		REG_TXPAUSE, REG_BEACON_CTRL,
+		REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+	};
+	const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+		REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+		REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+		REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+		REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE
+	};
+
+	/*
+	 * Note: IQ calibration must be performed after loading
+	 *       PHY_REG.txt , and radio_a, radio_b.txt
+	 */
+
+	if (t == 0) {
+		/* Save ADDA parameters, turn Path A ADDA on */
+		rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+				   RTL8XXXU_ADDA_REGS);
+		rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+		rtl8xxxu_save_regs(priv, iqk_bb_regs,
+				   priv->bb_backup, RTL8XXXU_BB_REGS);
+	}
+
+	rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+	if (t == 0) {
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1);
+		if (val32 & FPGA0_HSSI_PARM1_PI)
+			priv->pi_enabled = 1;
+	}
+
+	if (!priv->pi_enabled) {
+		/* Switch BB to PI mode to do IQ Calibration. */
+		rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100);
+		rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, 0x01000100);
+	}
+
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+	val32 &= ~FPGA_RF_MODE_CCK;
+	rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+	rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+	rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+	rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
+
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+	val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+	rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
+	val32 &= ~BIT(10);
+	rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32);
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
+	val32 &= ~BIT(10);
+	rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32);
+
+	if (priv->tx_paths > 1) {
+		rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00010000);
+		rtl8xxxu_write32(priv, REG_FPGA0_XB_LSSI_PARM, 0x00010000);
+	}
+
+	/* MAC settings */
+	rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+	/* Page B init */
+	rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x00080000);
+
+	if (priv->tx_paths > 1)
+		rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x00080000);
+
+	/* IQ calibration setting */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+	rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+	rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+	for (i = 0; i < retry; i++) {
+		path_a_ok = rtl8xxxu_iqk_path_a(priv);
+		if (path_a_ok == 0x03) {
+			val32 = rtl8xxxu_read32(priv,
+						REG_TX_POWER_BEFORE_IQK_A);
+			result[t][0] = (val32 >> 16) & 0x3ff;
+			val32 = rtl8xxxu_read32(priv,
+						REG_TX_POWER_AFTER_IQK_A);
+			result[t][1] = (val32 >> 16) & 0x3ff;
+			val32 = rtl8xxxu_read32(priv,
+						REG_RX_POWER_BEFORE_IQK_A_2);
+			result[t][2] = (val32 >> 16) & 0x3ff;
+			val32 = rtl8xxxu_read32(priv,
+						REG_RX_POWER_AFTER_IQK_A_2);
+			result[t][3] = (val32 >> 16) & 0x3ff;
+			break;
+		} else if (i == (retry - 1) && path_a_ok == 0x01) {
+			/* TX IQK OK */
+			dev_dbg(dev, "%s: Path A IQK Only Tx Success!!\n",
+				__func__);
+
+			val32 = rtl8xxxu_read32(priv,
+						REG_TX_POWER_BEFORE_IQK_A);
+			result[t][0] = (val32 >> 16) & 0x3ff;
+			val32 = rtl8xxxu_read32(priv,
+						REG_TX_POWER_AFTER_IQK_A);
+			result[t][1] = (val32 >> 16) & 0x3ff;
+		}
+	}
+
+	if (!path_a_ok)
+		dev_dbg(dev, "%s: Path A IQK failed!\n", __func__);
+
+	if (priv->tx_paths > 1) {
+		/*
+		 * Path A into standby
+		 */
+		rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x0);
+		rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00010000);
+		rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+		/* Turn Path B ADDA on */
+		rtl8xxxu_path_adda_on(priv, adda_regs, false);
+
+		for (i = 0; i < retry; i++) {
+			path_b_ok = rtl8xxxu_iqk_path_b(priv);
+			if (path_b_ok == 0x03) {
+				val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+				result[t][4] = (val32 >> 16) & 0x3ff;
+				val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+				result[t][5] = (val32 >> 16) & 0x3ff;
+				val32 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+				result[t][6] = (val32 >> 16) & 0x3ff;
+				val32 = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+				result[t][7] = (val32 >> 16) & 0x3ff;
+				break;
+			} else if (i == (retry - 1) && path_b_ok == 0x01) {
+				/* TX IQK OK */
+				val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+				result[t][4] = (val32 >> 16) & 0x3ff;
+				val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+				result[t][5] = (val32 >> 16) & 0x3ff;
+			}
+		}
+
+		if (!path_b_ok)
+			dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
+	}
+
+	/* Back to BB mode, load original value */
+	rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0);
+
+	if (t) {
+		if (!priv->pi_enabled) {
+			/*
+			 * Switch back BB to SI mode after finishing
+			 * IQ Calibration
+			 */
+			val32 = 0x01000000;
+			rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, val32);
+			rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, val32);
+		}
+
+		/* Reload ADDA power saving parameters */
+		rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+				      RTL8XXXU_ADDA_REGS);
+
+		/* Reload MAC parameters */
+		rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+		/* Reload BB parameters */
+		rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+				      priv->bb_backup, RTL8XXXU_BB_REGS);
+
+		/* Restore RX initial gain */
+		rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00032ed3);
+
+		if (priv->tx_paths > 1) {
+			rtl8xxxu_write32(priv, REG_FPGA0_XB_LSSI_PARM,
+					 0x00032ed3);
+		}
+
+		/* Load 0xe30 IQC default value */
+		rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+		rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+	}
+}
+
+static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+	struct device *dev = &priv->udev->dev;
+	int result[4][8];	/* last is final result */
+	int i, candidate;
+	bool path_a_ok, path_b_ok;
+	u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+	u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+	s32 reg_tmp = 0;
+	bool simu;
+
+	memset(result, 0, sizeof(result));
+	candidate = -1;
+
+	path_a_ok = false;
+	path_b_ok = false;
+
+	rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+
+	for (i = 0; i < 3; i++) {
+		rtl8xxxu_phy_iqcalibrate(priv, result, i);
+
+		if (i == 1) {
+			simu = rtl8xxxu_simularity_compare(priv, result, 0, 1);
+			if (simu) {
+				candidate = 0;
+				break;
+			}
+		}
+
+		if (i == 2) {
+			simu = rtl8xxxu_simularity_compare(priv, result, 0, 2);
+			if (simu) {
+				candidate = 0;
+				break;
+			}
+
+			simu = rtl8xxxu_simularity_compare(priv, result, 1, 2);
+			if (simu) {
+				candidate = 1;
+			} else {
+				for (i = 0; i < 8; i++)
+					reg_tmp += result[3][i];
+
+				if (reg_tmp)
+					candidate = 3;
+				else
+					candidate = -1;
+			}
+		}
+	}
+
+	for (i = 0; i < 4; i++) {
+		reg_e94 = result[i][0];
+		reg_e9c = result[i][1];
+		reg_ea4 = result[i][2];
+		reg_eac = result[i][3];
+		reg_eb4 = result[i][4];
+		reg_ebc = result[i][5];
+		reg_ec4 = result[i][6];
+		reg_ecc = result[i][7];
+	}
+
+	if (candidate >= 0) {
+		reg_e94 = result[candidate][0];
+		priv->rege94 =  reg_e94;
+		reg_e9c = result[candidate][1];
+		priv->rege9c = reg_e9c;
+		reg_ea4 = result[candidate][2];
+		reg_eac = result[candidate][3];
+		reg_eb4 = result[candidate][4];
+		priv->regeb4 = reg_eb4;
+		reg_ebc = result[candidate][5];
+		priv->regebc = reg_ebc;
+		reg_ec4 = result[candidate][6];
+		reg_ecc = result[candidate][7];
+		dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+		dev_dbg(dev,
+			"%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
+			"ecc=%x\n ", __func__, reg_e94, reg_e9c,
+			reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+		path_a_ok = true;
+		path_b_ok = true;
+	} else {
+		reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
+		reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
+	}
+
+	if (reg_e94 && candidate >= 0)
+		rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+					   candidate, (reg_ea4 == 0));
+
+	if (priv->tx_paths > 1 && reg_eb4)
+		rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
+					   candidate, (reg_ec4 == 0));
+
+	rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+			   priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+}
+
+static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
+{
+	u32 val32;
+	u32 rf_amode, rf_bmode = 0, lstf;
+
+	/* Check continuous TX and Packet TX */
+	lstf = rtl8xxxu_read32(priv, REG_OFDM1_LSTF);
+
+	if (lstf & OFDM_LSTF_MASK) {
+		/* Disable all continuous TX */
+		val32 = lstf & ~OFDM_LSTF_MASK;
+		rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32);
+
+		/* Read original RF mode Path A */
+		rf_amode = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_AC);
+
+		/* Set RF mode to standby Path A */
+		rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC,
+				     (rf_amode & 0x8ffff) | 0x10000);
+
+		/* Path-B */
+		if (priv->tx_paths > 1) {
+			rf_bmode = rtl8xxxu_read_rfreg(priv, RF_B,
+						       RF6052_REG_AC);
+
+			rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC,
+					     (rf_bmode & 0x8ffff) | 0x10000);
+		}
+	} else {
+		/*  Deal with Packet TX case */
+		/*  block all queues */
+		rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+	}
+
+	/* Start LC calibration */
+	val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG);
+	val32 |= 0x08000;
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32);
+
+	msleep(100);
+
+	/* Restore original parameters */
+	if (lstf & OFDM_LSTF_MASK) {
+		/* Path-A */
+		rtl8xxxu_write32(priv, REG_OFDM1_LSTF, lstf);
+		rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, rf_amode);
+
+		/* Path-B */
+		if (priv->tx_paths > 1)
+			rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC,
+					     rf_bmode);
+	} else /*  Deal with Packet TX case */
+		rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+}
+
+static int rtl8xxxu_set_mac(struct rtl8xxxu_priv *priv)
+{
+	int i;
+	u16 reg;
+
+	reg = REG_MACID;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		rtl8xxxu_write8(priv, reg + i, priv->mac_addr[i]);
+
+	return 0;
+}
+
+static int rtl8xxxu_set_bssid(struct rtl8xxxu_priv *priv, const u8 *bssid)
+{
+	int i;
+	u16 reg;
+
+	dev_dbg(&priv->udev->dev, "%s: (%pM)\n", __func__, bssid);
+
+	reg = REG_BSSID;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		rtl8xxxu_write8(priv, reg + i, bssid[i]);
+
+	return 0;
+}
+
+static void
+rtl8xxxu_set_ampdu_factor(struct rtl8xxxu_priv *priv, u8 ampdu_factor)
+{
+	u8 vals[4] = { 0x41, 0xa8, 0x72, 0xb9 };
+	u8 max_agg = 0xf;
+	int i;
+
+	ampdu_factor = 1 << (ampdu_factor + 2);
+	if (ampdu_factor > max_agg)
+		ampdu_factor = max_agg;
+
+	for (i = 0; i < 4; i++) {
+		if ((vals[i] & 0xf0) > (ampdu_factor << 4))
+			vals[i] = (vals[i] & 0x0f) | (ampdu_factor << 4);
+
+		if ((vals[i] & 0x0f) > ampdu_factor)
+			vals[i] = (vals[i] & 0xf0) | ampdu_factor;
+
+		rtl8xxxu_write8(priv, REG_AGGLEN_LMT + i, vals[i]);
+	}
+}
+
+static void rtl8xxxu_set_ampdu_min_space(struct rtl8xxxu_priv *priv, u8 density)
+{
+	u8 val8;
+
+	val8 = rtl8xxxu_read8(priv, REG_AMPDU_MIN_SPACE);
+	val8 &= 0xf8;
+	val8 |= density;
+	rtl8xxxu_write8(priv, REG_AMPDU_MIN_SPACE, val8);
+}
+
+static int rtl8xxxu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	int count, ret;
+
+	/* Start of rtl8723AU_card_enable_flow */
+	/* Act to Cardemu sequence*/
+	/* Turn off RF */
+	rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+
+	/* 0x004E[7] = 0, switch DPDT_SEL_P output from register 0x0065[2] */
+	val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+	val8 &= ~LEDCFG2_DPDT_SELECT;
+	rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+	/* 0x0005[1] = 1 turn off MAC by HW state machine*/
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 |= BIT(1);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+		val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+		if ((val8 & BIT(1)) == 0)
+			break;
+		udelay(10);
+	}
+
+	if (!count) {
+		dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n",
+			 __func__);
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	/* 0x0000[5] = 1 analog Ips to digital, 1:isolation */
+	val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+	val8 |= SYS_ISO_ANALOG_IPS;
+	rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+	/* 0x0020[0] = 0 disable LDOA12 MACRO block*/
+	val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+	val8 &= ~LDOA15_ENABLE;
+	rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+exit:
+	return ret;
+}
+
+static int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u8 val32;
+	int count, ret;
+
+	rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+	/*
+	 * Poll - wait for RX packet to complete
+	 */
+	for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+		val32 = rtl8xxxu_read32(priv, 0x5f8);
+		if (!val32)
+			break;
+		udelay(10);
+	}
+
+	if (!count) {
+		dev_warn(&priv->udev->dev,
+			 "%s: RX poll timed out (0x05f8)\n", __func__);
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	/* Disable CCK and OFDM, clock gated */
+	val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+	val8 &= ~SYS_FUNC_BBRSTB;
+	rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+	udelay(2);
+
+	/* Reset baseband */
+	val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+	val8 &= ~SYS_FUNC_BB_GLB_RSTN;
+	rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+	/* Reset MAC TRX */
+	val8 = rtl8xxxu_read8(priv, REG_CR);
+	val8 = CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE;
+	rtl8xxxu_write8(priv, REG_CR, val8);
+
+	/* Reset MAC TRX */
+	val8 = rtl8xxxu_read8(priv, REG_CR + 1);
+	val8 &= ~BIT(1); /* CR_SECURITY_ENABLE */
+	rtl8xxxu_write8(priv, REG_CR + 1, val8);
+
+	/* Respond TX OK to scheduler */
+	val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST);
+	val8 |= DUAL_TSF_TX_OK;
+	rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8);
+
+exit:
+	return ret;
+}
+
+static void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+
+	/* Clear suspend enable and power down enable*/
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 &= ~(BIT(3) | BIT(7));
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	/* 0x48[16] = 0 to disable GPIO9 as EXT WAKEUP*/
+	val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2);
+	val8 &= ~BIT(0);
+	rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8);
+
+	/* 0x04[12:11] = 11 enable WL suspend*/
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 &= ~(BIT(3) | BIT(4));
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+}
+
+static int rtl8xxxu_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u32 val32;
+	int count, ret = 0;
+
+	/* 0x20[0] = 1 enable LDOA12 MACRO block for all interface*/
+	val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+	val8 |= LDOA15_ENABLE;
+	rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+	/* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
+	val8 = rtl8xxxu_read8(priv, 0x0067);
+	val8 &= ~BIT(4);
+	rtl8xxxu_write8(priv, 0x0067, val8);
+
+	mdelay(1);
+
+	/* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
+	val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+	val8 &= ~SYS_ISO_ANALOG_IPS;
+	rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+	/* disable SW LPS 0x04[10]= 0 */
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 &= ~BIT(2);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	/* wait till 0x04[17] = 1 power ready*/
+	for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+		val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+		if (val32 & BIT(17))
+			break;
+
+		udelay(10);
+	}
+
+	if (!count) {
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	/* We should be able to optimize the following three entries into one */
+
+	/* release WLON reset 0x04[16]= 1*/
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+	val8 |= BIT(0);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+	/* disable HWPDN 0x04[15]= 0*/
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 &= ~BIT(7);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	/* disable WL suspend*/
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 &= ~(BIT(3) | BIT(4));
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	/* set, then poll until 0 */
+	val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+	val32 |= APS_FSMCO_MAC_ENABLE;
+	rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+	for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+		val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+		if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+			ret = 0;
+			break;
+		}
+		udelay(10);
+	}
+
+	if (!count) {
+		ret = -EBUSY;
+		goto exit;
+	}
+
+	/* 0x4C[23] = 0x4E[7] = 1, switch DPDT_SEL_P output from WL BB */
+	/*
+	 * Note: Vendor driver actually clears this bit, despite the
+	 * documentation claims it's being set!
+	 */
+	val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+	val8 |= LEDCFG2_DPDT_SELECT;
+	val8 &= ~LEDCFG2_DPDT_SELECT;
+	rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+exit:
+	return ret;
+}
+
+static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+
+	/* 0x0007[7:0] = 0x20 SOP option to disable BG/MB */
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 3, 0x20);
+
+	/* 0x04[12:11] = 01 enable WL suspend */
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 &= ~BIT(4);
+	val8 |= BIT(3);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+	val8 |= BIT(7);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+	/* 0x48[16] = 1 to enable GPIO9 as EXT wakeup */
+	val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2);
+	val8 |= BIT(0);
+	rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8);
+
+	return 0;
+}
+
+static int rtl8723au_power_on(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u16 val16;
+	u32 val32;
+	int ret;
+
+	/*
+	 * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
+	 */
+	rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
+
+	rtl8xxxu_disabled_to_emu(priv);
+
+	ret = rtl8xxxu_emu_to_active(priv);
+	if (ret)
+		goto exit;
+
+	/*
+	 * 0x0004[19] = 1, reset 8051
+	 */
+	val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+	val8 |= BIT(3);
+	rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+	/*
+	 * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+	 * Set CR bit10 to enable 32k calibration.
+	 */
+	val16 = rtl8xxxu_read16(priv, REG_CR);
+	val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+		  CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+		  CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+		  CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
+		  CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+	rtl8xxxu_write16(priv, REG_CR, val16);
+
+	/* For EFuse PG */
+	val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+	val32 &= ~(BIT(28) | BIT(29) | BIT(30));
+	val32 |= (0x06 << 28);
+	rtl8xxxu_write32(priv, REG_EFUSE_CTRL, val32);
+exit:
+	return ret;
+}
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+
+static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u16 val16;
+	u32 val32;
+	int i;
+
+	for (i = 100; i; i--) {
+		val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
+		if (val8 & APS_FSMCO_PFM_ALDN)
+			break;
+	}
+
+	if (!i) {
+		pr_info("%s: Poll failed\n", __func__);
+		return -ENODEV;
+	}
+
+	/*
+	 * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
+	 */
+	rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
+	rtl8xxxu_write8(priv, REG_SPS0_CTRL, 0x2b);
+	udelay(100);
+
+	val8 = rtl8xxxu_read8(priv, REG_LDOV12D_CTRL);
+	if (!(val8 & LDOV12D_ENABLE)) {
+		pr_info("%s: Enabling LDOV12D (%02x)\n", __func__, val8);
+		val8 |= LDOV12D_ENABLE;
+		rtl8xxxu_write8(priv, REG_LDOV12D_CTRL, val8);
+
+		udelay(100);
+
+		val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+		val8 &= ~SYS_ISO_MD2PP;
+		rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+	}
+
+	/*
+	 * Auto enable WLAN
+	 */
+	val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+	val16 |= APS_FSMCO_MAC_ENABLE;
+	rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+	for (i = 1000; i; i--) {
+		val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+		if (!(val16 & APS_FSMCO_MAC_ENABLE))
+			break;
+	}
+	if (!i) {
+		pr_info("%s: FSMCO_MAC_ENABLE poll failed\n", __func__);
+		return -EBUSY;
+	}
+
+	/*
+	 * Enable radio, GPIO, LED
+	 */
+	val16 = APS_FSMCO_HW_SUSPEND | APS_FSMCO_ENABLE_POWERDOWN |
+		APS_FSMCO_PFM_ALDN;
+	rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+	/*
+	 * Release RF digital isolation
+	 */
+	val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL);
+	val16 &= ~SYS_ISO_DIOR;
+	rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16);
+
+	val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
+	val8 &= ~APSD_CTRL_OFF;
+	rtl8xxxu_write8(priv, REG_APSD_CTRL, val8);
+	for (i = 200; i; i--) {
+		val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
+		if (!(val8 & APSD_CTRL_OFF_STATUS))
+			break;
+	}
+
+	if (!i) {
+		pr_info("%s: APSD_CTRL poll failed\n", __func__);
+		return -EBUSY;
+	}
+
+	/*
+	 * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+	 */
+	val16 = rtl8xxxu_read16(priv, REG_CR);
+	val16 |= CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+		CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE |
+		CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE;
+	rtl8xxxu_write16(priv, REG_CR, val16);
+
+	/*
+	 * Workaround for 8188RU LNA power leakage problem.
+	 */
+	if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
+		val32 &= ~BIT(1);
+		rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
+	}
+	return 0;
+}
+
+#endif
+
+static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
+{
+	u8 val8;
+	u16 val16;
+	u32 val32;
+
+	/*
+	 * Workaround for 8188RU LNA power leakage problem.
+	 */
+	if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+		val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
+		val32 |= BIT(1);
+		rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
+	}
+
+	rtl8xxxu_active_to_lps(priv);
+
+	/* Turn off RF */
+	rtl8xxxu_write8(priv, REG_RF_CTRL, 0x00);
+
+	/* Reset Firmware if running in RAM */
+	if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+		rtl8xxxu_firmware_self_reset(priv);
+
+	/* Reset MCU */
+	val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+	val16 &= ~SYS_FUNC_CPU_ENABLE;
+	rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+	/* Reset MCU ready status */
+	rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+	rtl8xxxu_active_to_emu(priv);
+	rtl8xxxu_emu_to_disabled(priv);
+
+	/* Reset MCU IO Wrapper */
+	val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+	val8 &= ~BIT(0);
+	rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+	val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+	val8 |= BIT(0);
+	rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+	/* RSV_CTRL 0x1C[7:0] = 0x0e  lock ISO/CLK/Power control register */
+	rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e);
+}
+
+static void rtl8xxxu_init_bt(struct rtl8xxxu_priv *priv)
+{
+	if (!priv->has_bluetooth)
+		return;
+}
+
+static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct device *dev = &priv->udev->dev;
+	struct rtl8xxxu_rfregval *rftable;
+	bool macpower;
+	int ret;
+	u8 val8;
+	u16 val16;
+	u32 val32;
+
+	/* Check if MAC is already powered on */
+	val8 = rtl8xxxu_read8(priv, REG_CR);
+
+	/*
+	 * Fix 92DU-VC S3 hang with the reason is that secondary mac is not
+	 * initialized. First MAC returns 0xea, second MAC returns 0x00
+	 */
+	if (val8 == 0xea)
+		macpower = false;
+	else
+		macpower = true;
+
+	ret = priv->fops->power_on(priv);
+	if (ret < 0) {
+		dev_warn(dev, "%s: Failed power on\n", __func__);
+		goto exit;
+	}
+
+	dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
+	if (!macpower) {
+		ret = rtl8xxxu_init_llt_table(priv, TX_TOTAL_PAGE_NUM);
+		if (ret) {
+			dev_warn(dev, "%s: LLT table init failed\n", __func__);
+			goto exit;
+		}
+	}
+
+	ret = rtl8xxxu_download_firmware(priv);
+	dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+	ret = rtl8xxxu_start_firmware(priv);
+	dev_dbg(dev, "%s: start_fiwmare %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+
+	ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table);
+	dev_dbg(dev, "%s: init_mac %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+
+	ret = rtl8xxxu_init_phy_bb(priv);
+	dev_dbg(dev, "%s: init_phy_bb %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+
+	switch(priv->rtlchip) {
+	case 0x8723a:
+		rftable = rtl8723au_radioa_1t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+		break;
+	case 0x8188c:
+		if (priv->hi_pa)
+			rftable = rtl8188ru_radioa_1t_highpa_table;
+		else
+			rftable = rtl8192cu_radioa_1t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+		break;
+	case 0x8191c:
+		rftable = rtl8192cu_radioa_1t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+		break;
+	case 0x8192c:
+		rftable = rtl8192cu_radioa_2t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+		if (ret)
+			break;
+		rftable = rtl8192cu_radiob_2t_init_table;
+		ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	if (ret)
+		goto exit;
+
+	/* Reduce 80M spur */
+	rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
+	rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+	rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
+	rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+
+	/* RFSW Control - clear bit 14 ?? */
+	rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
+	/* 0x07000760 */
+	val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
+		FPGA0_RF_ANTSWB | FPGA0_RF_PAPE |
+		((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) <<
+		 FPGA0_RF_BD_CTRL_SHIFT);
+	rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+	/* 0x860[6:5]= 00 - why? - this sets antenna B */
+	rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66F60210);
+
+	priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A,
+						  RF6052_REG_MODE_AG);
+
+	dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
+	if (!macpower) {
+		if (priv->ep_tx_normal_queue)
+			val8 = TX_PAGE_NUM_NORM_PQ;
+		else
+			val8 = 0;
+
+		rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
+
+		val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD;
+
+		if (priv->ep_tx_high_queue)
+			val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
+		if (priv->ep_tx_low_queue)
+			val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
+
+		rtl8xxxu_write32(priv, REG_RQPN, val32);
+
+		/*
+		 * Set TX buffer boundary
+		 */
+		val8 = TX_TOTAL_PAGE_NUM + 1;
+		rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
+		rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8);
+		rtl8xxxu_write8(priv, REG_TXPKTBUF_WMAC_LBK_BF_HD, val8);
+		rtl8xxxu_write8(priv, REG_TRXFF_BNDY, val8);
+		rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8);
+	}
+
+	ret = rtl8xxxu_init_queue_priority(priv);
+	dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
+	if (ret)
+		goto exit;
+
+	/*
+	 * Set RX page boundary
+	 */
+	rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff);
+	/*
+	 * Transfer page size is always 128
+	 */
+	val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) |
+		(PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT);
+	rtl8xxxu_write8(priv, REG_PBP, val8);
+
+	/*
+	 * Unit in 8 bytes, not obvious what it is used for
+	 */
+	rtl8xxxu_write8(priv, REG_RX_DRVINFO_SZ, 4);
+
+	/*
+	 * Enable all interrupts - not obvious USB needs to do this
+	 */
+	rtl8xxxu_write32(priv, REG_HISR, 0xffffffff);
+	rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
+
+	rtl8xxxu_set_mac(priv);
+	rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION);
+
+	/*
+	 * Configure initial WMAC settings
+	 */
+	val32 = RCR_ACCEPT_PHYS_MATCH | RCR_ACCEPT_MCAST | RCR_ACCEPT_BCAST |
+		/* RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON | */
+		RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL |
+		RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC;
+	rtl8xxxu_write32(priv, REG_RCR, val32);
+
+	/*
+	 * Accept all multicast
+	 */
+	rtl8xxxu_write32(priv, REG_MAR, 0xffffffff);
+	rtl8xxxu_write32(priv, REG_MAR + 4, 0xffffffff);
+
+	/*
+	 * Init adaptive controls
+	 */
+	val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+	val32 &= ~RESPONSE_RATE_BITMAP_ALL;
+	val32 |= RESPONSE_RATE_RRSR_CCK_ONLY_1M;
+	rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+
+	/* CCK = 0x0a, OFDM = 0x10 */
+	rtl8xxxu_set_spec_sifs(priv, 0x10, 0x10);
+	rtl8xxxu_set_retry(priv, 0x30, 0x30);
+	rtl8xxxu_set_spec_sifs(priv, 0x0a, 0x10);
+
+	/*
+	 * Init EDCA
+	 */
+	rtl8xxxu_write16(priv, REG_MAC_SPEC_SIFS, 0x100a);
+
+	/* Set CCK SIFS */
+	rtl8xxxu_write16(priv, REG_SIFS_CCK, 0x100a);
+
+	/* Set OFDM SIFS */
+	rtl8xxxu_write16(priv, REG_SIFS_OFDM, 0x100a);
+
+	/* TXOP */
+	rtl8xxxu_write32(priv, REG_EDCA_BE_PARAM, 0x005ea42b);
+	rtl8xxxu_write32(priv, REG_EDCA_BK_PARAM, 0x0000a44f);
+	rtl8xxxu_write32(priv, REG_EDCA_VI_PARAM, 0x005ea324);
+	rtl8xxxu_write32(priv, REG_EDCA_VO_PARAM, 0x002fa226);
+
+	/* Set data auto rate fallback retry count */
+	rtl8xxxu_write32(priv, REG_DARFRC, 0x00000000);
+	rtl8xxxu_write32(priv, REG_DARFRC + 4, 0x10080404);
+	rtl8xxxu_write32(priv, REG_RARFRC, 0x04030201);
+	rtl8xxxu_write32(priv, REG_RARFRC + 4, 0x08070605);
+
+	val8 = rtl8xxxu_read8(priv, REG_FWHW_TXQ_CTRL);
+	val8 |= FWHW_TXQ_CTRL_AMPDU_RETRY;
+	rtl8xxxu_write8(priv, REG_FWHW_TXQ_CTRL, val8);
+
+	/*  Set ACK timeout */
+	rtl8xxxu_write8(priv, REG_ACKTO, 0x40);
+
+	/*
+	 * Initialize beacon parameters
+	 */
+	val16 = BEACON_DISABLE_TSF_UPDATE | (BEACON_DISABLE_TSF_UPDATE << 8);
+	rtl8xxxu_write16(priv, REG_BEACON_CTRL, val16);
+	rtl8xxxu_write16(priv, REG_TBTT_PROHIBIT, 0x6404);
+	rtl8xxxu_write8(priv, REG_DRIVER_EARLY_INT, DRIVER_EARLY_INT_TIME);
+	rtl8xxxu_write8(priv, REG_BEACON_DMA_TIME, BEACON_DMA_ATIME_INT_TIME);
+	rtl8xxxu_write16(priv, REG_BEACON_TCFG, 0x660F);
+
+	/*
+	 * Enable CCK and OFDM block
+	 */
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+	val32 |= (FPGA_RF_MODE_CCK | FPGA_RF_MODE_OFDM);
+	rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+	/*
+	 * Invalidate all CAM entries - bit 30 is undocumented
+	 */
+	rtl8xxxu_write32(priv, REG_CAM_CMD, CAM_CMD_POLLING | BIT(30));
+
+	/*
+	 * Start out with default power levels for channel 6, 20MHz
+	 */
+	rtl8723a_set_tx_power(priv, 1, false);
+
+	/* Let the 8051 take control of antenna setting */
+	val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+	val8 |= LEDCFG2_DPDT_SELECT;
+	rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+	rtl8xxxu_write8(priv, REG_HWSEQ_CTRL, 0xff);
+
+	/* Disable BAR - not sure if this has any effect on USB */
+	rtl8xxxu_write32(priv, REG_BAR_MODE_CTRL, 0x0201ffff);
+
+	rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0);
+
+	/*
+	 * Not sure if we should get into this at all
+	 */
+	if (priv->iqk_initialized) {
+		rtl8xxxu_restore_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+				      priv->bb_recovery_backup,
+				      RTL8XXXU_BB_REGS);
+	} else {
+		rtl8723a_phy_iq_calibrate(priv);
+		priv->iqk_initialized = true;
+	}
+
+	/*
+	 * This should enable thermal meter
+	 */
+	rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60);
+
+	rtl8723a_phy_lc_calibrate(priv);
+
+	/* fix USB interface interference issue */
+	rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+	rtl8xxxu_write8(priv, 0xfe41, 0x8d);
+	rtl8xxxu_write8(priv, 0xfe42, 0x80);
+	rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
+
+	/* Solve too many protocol error on USB bus */
+	/* Can't do this for 8188/8192 UMC A cut parts */
+	rtl8xxxu_write8(priv, 0xfe40, 0xe6);
+	rtl8xxxu_write8(priv, 0xfe41, 0x94);
+	rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+	rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+	rtl8xxxu_write8(priv, 0xfe41, 0x19);
+	rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+	rtl8xxxu_write8(priv, 0xfe40, 0xe5);
+	rtl8xxxu_write8(priv, 0xfe41, 0x91);
+	rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+	rtl8xxxu_write8(priv, 0xfe40, 0xe2);
+	rtl8xxxu_write8(priv, 0xfe41, 0x81);
+	rtl8xxxu_write8(priv, 0xfe42, 0x80);
+
+	/* Init BT hw config. */
+	rtl8xxxu_init_bt(priv);
+
+	/*
+	 * Not sure if we really need to save these parameters, but the
+	 * vendor driver does
+	 */
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
+	if (val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR)
+		priv->path_a_hi_power = 1;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+	priv->path_a_rf_paths = val32 & OFDM_RF_PATH_RX_MASK;
+
+	val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+	priv->path_a_ig_value = val32 & OFDM0_X_AGC_CORE1_IGI_MASK;
+
+	/* Set NAV_UPPER to 30000us */
+	val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT);
+	rtl8xxxu_write8(priv, REG_NAV_UPPER, val8);
+
+	/*
+	 * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test,
+	 * but we need to fin root cause.
+	 */
+	val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+	if ((val32 & 0xff000000) != 0x83000000) {
+		val32 |= FPGA_RF_MODE_CCK;
+		rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+	}
+
+	val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL);
+	val32 |= FWHW_TXQ_CTRL_XMIT_MGMT_ACK;
+	/* ack for xmit mgmt frames. */
+	rtl8xxxu_write32(priv, REG_FWHW_TXQ_CTRL, val32);
+
+exit:
+	return ret;
+}
+
+static void rtl8xxxu_disable_device(struct ieee80211_hw *hw)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+
+	rtl8xxxu_power_off(priv);
+}
+
+static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
+			       struct ieee80211_key_conf *key, const u8 *mac)
+{
+	u32 cmd, val32, addr, ctrl;
+	int j, i, tmp_debug;
+
+	tmp_debug = rtl8xxxu_debug;
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_KEY)
+		rtl8xxxu_debug |= RTL8XXXU_DEBUG_REG_WRITE;
+
+	/*
+	 * This is a bit of a hack - the lower bits of the cipher
+	 * suite selector happens to match the cipher index in the CAM
+	 */
+	addr = key->keyidx << CAM_CMD_KEY_SHIFT;
+	ctrl = (key->cipher & 0x0f) << 2 | key->keyidx | CAM_WRITE_VALID;
+
+	for (j = 5; j >= 0; j--) {
+		switch (j) {
+		case 0:
+			val32 = ctrl | (mac[0] << 16) | (mac[1] << 24);
+			break;
+		case 1:
+			val32 = mac[2] | (mac[3] << 8) |
+				(mac[4] << 16) | (mac[5] << 24);
+			break;
+		default:
+			i = (j - 2) << 2;
+			val32 = key->key[i] | (key->key[i + 1] << 8) |
+				key->key[i + 2] << 16 | key->key[i + 3] << 24;
+			break;
+		}
+
+		rtl8xxxu_write32(priv, REG_CAM_WRITE, val32);
+		cmd = CAM_CMD_POLLING | CAM_CMD_WRITE | (addr + j);
+		rtl8xxxu_write32(priv, REG_CAM_CMD, cmd);
+		udelay(100);
+	}
+
+	rtl8xxxu_debug = tmp_debug;
+}
+
+static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif, const u8* mac)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	u8 val8;
+
+	val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+	val8 |= BEACON_DISABLE_TSF_UPDATE;
+	rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+}
+
+static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	u8 val8;
+
+	val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+	val8 &= ~BEACON_DISABLE_TSF_UPDATE;
+	rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+}
+
+static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
+				      u32 ramask, int sgi)
+{
+	struct h2c_cmd h2c;
+
+	h2c.ramask.cmd = H2C_SET_RATE_MASK;
+	h2c.ramask.mask_lo = cpu_to_le16(ramask & 0xffff);
+	h2c.ramask.mask_hi = cpu_to_le16(ramask >> 16);
+
+	h2c.ramask.arg = 0x80;
+	if (sgi)
+		h2c.ramask.arg |= 0x20;
+
+	dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x\n", __func__,
+		ramask, h2c.ramask.arg);
+	rtl8723a_h2c_cmd(priv, &h2c);
+}
+
+static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
+{
+	u32 val32;
+	u8 rate_idx = 0;
+
+	rate_cfg &= RESPONSE_RATE_BITMAP_ALL;
+
+	val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+	val32 &= ~RESPONSE_RATE_BITMAP_ALL;
+	val32 |= rate_cfg;
+	rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+
+	dev_dbg(&priv->udev->dev, "%s: rates %08x\n", __func__,	rate_cfg);
+
+	while (rate_cfg) {
+		rate_cfg = (rate_cfg >> 1);
+		rate_idx++;
+	}
+	rtl8xxxu_write8(priv, REG_INIRTS_RATE_SEL, rate_idx);
+}
+
+static void
+rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			  struct ieee80211_bss_conf *bss_conf, u32 changed)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct device *dev = &priv->udev->dev;
+	struct ieee80211_sta *sta;
+	u32 val32;
+	u8 val8;
+
+	if (changed & BSS_CHANGED_ASSOC) {
+		struct h2c_cmd h2c;
+
+		dev_dbg(dev, "Changed ASSOC: %i!\n", bss_conf->assoc);
+
+		memset(&h2c, 0, sizeof(struct h2c_cmd));
+		rtl8xxxu_set_linktype(priv, vif->type);
+
+		if (bss_conf->assoc) {
+			u32 ramask;
+			int sgi = 0;
+
+			rcu_read_lock();
+			sta = ieee80211_find_sta(vif, bss_conf->bssid);
+			if (!sta) {
+				dev_info(dev, "%s: ASSOC no sta found\n",
+					 __func__);
+				rcu_read_unlock();
+				goto error;
+			}
+
+			if (sta->ht_cap.ht_supported)
+				dev_info(dev, "%s: HT supported\n", __func__);
+			if (sta->vht_cap.vht_supported)
+				dev_info(dev, "%s: VHT supported\n", __func__);
+
+			/* TODO: Set bits 28-31 for rate adaptive id */
+			ramask = (sta->supp_rates[0] & 0xfff) |
+				sta->ht_cap.mcs.rx_mask[0] << 12 |
+				sta->ht_cap.mcs.rx_mask[1] << 20;
+			if (sta->ht_cap.cap &
+			    (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))
+				sgi = 1;
+			rcu_read_unlock();
+
+			rtl8xxxu_update_rate_mask(priv, ramask, sgi);
+
+			val32 = rtl8xxxu_read32(priv, REG_RCR);
+			val32 |= RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON;
+			rtl8xxxu_write32(priv, REG_RCR, val32);
+
+			/* Enable RX of data frames */
+			rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff);
+
+			rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
+
+			rtl8723a_stop_tx_beacon(priv);
+
+			/* joinbss sequence */
+			rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
+					 0xc000 | bss_conf->aid);
+
+			h2c.joinbss.data = H2C_JOIN_BSS_CONNECT;
+		} else {
+			val32 = rtl8xxxu_read32(priv, REG_RCR);
+			val32 &= ~(RCR_CHECK_BSSID_MATCH |
+				   RCR_CHECK_BSSID_BEACON);
+			rtl8xxxu_write32(priv, REG_RCR, val32);
+
+			val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+			val8 |= BEACON_DISABLE_TSF_UPDATE;
+			rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+
+			/* Disable RX of data frames */
+			rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+			h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT;
+		}
+		h2c.joinbss.cmd = H2C_JOIN_BSS_REPORT;
+		rtl8723a_h2c_cmd(priv, &h2c);
+	}
+
+	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+		dev_dbg(dev, "Changed ERP_PREAMBLE: Use short preamble %i\n",
+			bss_conf->use_short_preamble);
+		val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+		if (bss_conf->use_short_preamble)
+			val32 |= RSR_ACK_SHORT_PREAMBLE;
+		else
+			val32 &= ~RSR_ACK_SHORT_PREAMBLE;
+		rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
+	}
+
+	if (changed & BSS_CHANGED_ERP_SLOT) {
+		dev_dbg(dev, "Changed ERP_SLOT: short_slot_time %i\n",
+			bss_conf->use_short_slot);
+
+		if (bss_conf->use_short_slot)
+			val8 = 9;
+		else
+			val8 = 20;
+		rtl8xxxu_write8(priv, REG_SLOT, val8);
+	}
+
+	if (changed & BSS_CHANGED_BSSID) {
+		dev_dbg(dev, "Changed BSSID!\n");
+		rtl8xxxu_set_bssid(priv, bss_conf->bssid);
+	}
+
+	if (changed & BSS_CHANGED_BASIC_RATES) {
+		dev_dbg(dev, "Changed BASIC_RATES!\n");
+		rtl8xxxu_set_basic_rates(priv, bss_conf->basic_rates);
+	}
+error:
+	return;
+}
+
+static u32 rtl8xxxu_80211_to_rtl_queue(u32 queue)
+{
+	u32 rtlqueue;
+
+	switch (queue) {
+	case IEEE80211_AC_VO:
+		rtlqueue = TXDESC_QUEUE_VO;
+		break;
+	case IEEE80211_AC_VI:
+		rtlqueue = TXDESC_QUEUE_VI;
+		break;
+	case IEEE80211_AC_BE:
+		rtlqueue = TXDESC_QUEUE_BE;
+		break;
+	case IEEE80211_AC_BK:
+		rtlqueue = TXDESC_QUEUE_BK;
+		break;
+	default:
+		rtlqueue = TXDESC_QUEUE_BE;
+	}
+
+	return rtlqueue;
+}
+
+static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	u32 queue;
+
+	if (ieee80211_is_mgmt(hdr->frame_control))
+		queue = TXDESC_QUEUE_MGNT;
+	else
+		queue = rtl8xxxu_80211_to_rtl_queue(skb_get_queue_mapping(skb));
+
+	return queue;
+}
+
+static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_tx_desc *tx_desc)
+{
+	__le16 *ptr = (__le16 *)tx_desc;
+	u16 csum = 0;
+	int i;
+
+	/*
+	 * Clear csum field before calculation, as the csum field is
+	 * in the middle of the struct.
+	 */
+	tx_desc->csum = cpu_to_le16(0);
+
+	for (i = 0; i < (sizeof(struct rtl8xxxu_tx_desc) / sizeof(u16)); i++)
+		csum = csum ^ le16_to_cpu(ptr[i]);
+
+	tx_desc->csum |= cpu_to_le16(csum);
+}
+
+static void rtl8xxxu_free_tx_resources(struct rtl8xxxu_priv *priv)
+{
+	struct rtl8xxxu_tx_urb *tx_urb, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->tx_urb_lock, flags);
+	list_for_each_entry_safe(tx_urb, tmp, &priv->tx_urb_free_list, list) {
+		list_del(&tx_urb->list);
+		priv->tx_urb_free_count--;
+		usb_free_urb(&tx_urb->urb);
+	}
+	spin_unlock_irqrestore(&priv->tx_urb_lock, flags);
+}
+
+static struct rtl8xxxu_tx_urb *
+rtl8xxxu_alloc_tx_urb(struct rtl8xxxu_priv *priv)
+{
+	struct rtl8xxxu_tx_urb *tx_urb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->tx_urb_lock, flags);
+	tx_urb = list_first_entry_or_null(&priv->tx_urb_free_list,
+					  struct rtl8xxxu_tx_urb, list);
+	if (tx_urb) {
+		list_del(&tx_urb->list);
+		priv->tx_urb_free_count--;
+		if (priv->tx_urb_free_count < RTL8XXXU_TX_URB_LOW_WATER &&
+		    !priv->tx_stopped) {
+			priv->tx_stopped = true;
+			ieee80211_stop_queues(priv->hw);
+		}
+	}
+
+	spin_unlock_irqrestore(&priv->tx_urb_lock, flags);
+
+	return tx_urb;
+}
+
+static void rtl8xxxu_free_tx_urb(struct rtl8xxxu_priv *priv,
+				 struct rtl8xxxu_tx_urb *tx_urb)
+{
+	unsigned long flags;
+
+	INIT_LIST_HEAD(&tx_urb->list);
+
+	spin_lock_irqsave(&priv->tx_urb_lock, flags);
+
+	list_add(&tx_urb->list, &priv->tx_urb_free_list);
+	priv->tx_urb_free_count++;
+	if (priv->tx_urb_free_count > RTL8XXXU_TX_URB_HIGH_WATER &&
+	    priv->tx_stopped) {
+		priv->tx_stopped = false;
+		ieee80211_wake_queues(priv->hw);
+	}
+
+	spin_unlock_irqrestore(&priv->tx_urb_lock, flags);
+}
+
+static void rtl8xxxu_tx_complete(struct urb *urb)
+{
+	struct sk_buff *skb = (struct sk_buff *)urb->context;
+	struct ieee80211_tx_info *tx_info;
+	struct ieee80211_hw *hw;
+	struct rtl8xxxu_tx_urb *tx_urb =
+		container_of(urb, struct rtl8xxxu_tx_urb, urb);
+
+	tx_info = IEEE80211_SKB_CB(skb);
+	hw = tx_info->rate_driver_data[0];
+
+	skb_pull(skb, sizeof(struct rtl8xxxu_tx_desc));
+
+	ieee80211_tx_info_clear_status(tx_info);
+	tx_info->status.rates[0].idx = -1;
+	tx_info->status.rates[0].count = 0;
+
+	if (!urb->status)
+		tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+	ieee80211_tx_status_irqsafe(hw, skb);
+
+	rtl8xxxu_free_tx_urb(hw->priv, tx_urb);
+}
+
+static void rtl8xxxu_dump_action(struct device *dev,
+				 struct ieee80211_hdr *hdr)
+{
+	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)hdr;
+	u16 cap, timeout;
+
+	if (!(rtl8xxxu_debug & RTL8XXXU_DEBUG_ACTION))
+		return;
+
+	switch (mgmt->u.action.u.addba_resp.action_code) {
+	case WLAN_ACTION_ADDBA_RESP:
+		cap = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
+		timeout = le16_to_cpu(mgmt->u.action.u.addba_resp.timeout);
+		dev_info(dev, "WLAN_ACTION_ADDBA_RESP: "
+			 "timeout %i, tid %02x, buf_size %02x, policy %02x, "
+			 "status %02x\n",
+			 timeout,
+			 (cap & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2,
+			 (cap & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6,
+			 (cap >> 1) & 0x1,
+			 le16_to_cpu(mgmt->u.action.u.addba_resp.status));
+		break;
+	case WLAN_ACTION_ADDBA_REQ:
+		cap = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+		timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
+		dev_info(dev, "WLAN_ACTION_ADDBA_REQ: "
+			 "timeout %i, tid %02x, buf_size %02x, policy %02x\n",
+			 timeout,
+			 (cap & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2,
+			 (cap & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6,
+			 (cap >> 1) & 0x1);
+		break;
+	default:
+		dev_info(dev, "action frame %02x\n",
+			 mgmt->u.action.u.addba_resp.action_code);
+		break;
+	}
+}
+
+static void rtl8xxxu_tx(struct ieee80211_hw *hw,
+			struct ieee80211_tx_control *control,
+			struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct rtl8xxxu_tx_desc *tx_desc;
+	struct rtl8xxxu_tx_urb *tx_urb;
+	struct ieee80211_sta *sta = NULL;
+	struct ieee80211_vif *vif = tx_info->control.vif;
+	struct device *dev = &priv->udev->dev;
+	u32 queue, rate;
+	u16 pktlen = skb->len;
+	u16 seq_number;
+	u16 rate_flag = tx_info->control.rates[0].flags;
+	int ret;
+
+	if (skb_headroom(skb) < sizeof(struct rtl8xxxu_tx_desc)) {
+		dev_warn(dev,
+			 "%s: Not enough headroom (%i) for tx descriptor\n",
+			 __func__, skb_headroom(skb));
+		goto error;
+	}
+
+	if (unlikely(skb->len > (65535 - sizeof(struct rtl8xxxu_tx_desc)))) {
+		dev_warn(dev, "%s: Trying to send over-sized skb (%i)\n",
+			 __func__, skb->len);
+		goto error;
+	}
+
+	tx_urb = rtl8xxxu_alloc_tx_urb(priv);
+	if (!tx_urb) {
+		dev_warn(dev, "%s: Unable to allocate tx urb\n", __func__);
+		goto error;
+	}
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
+		dev_info(dev, "%s: TX rate: %d (%d), pkt size %d\n",
+			 __func__, tx_rate->bitrate, tx_rate->hw_value, pktlen);
+
+	if (ieee80211_is_action(hdr->frame_control))
+		rtl8xxxu_dump_action(dev, hdr);
+
+	tx_info->rate_driver_data[0] = hw;
+
+	if (control && control->sta)
+		sta = control->sta;
+
+	tx_desc = (struct rtl8xxxu_tx_desc *)
+		skb_push(skb, sizeof(struct rtl8xxxu_tx_desc));
+
+	memset(tx_desc, 0, sizeof(struct rtl8xxxu_tx_desc));
+	tx_desc->pkt_size = cpu_to_le16(pktlen);
+	tx_desc->pkt_offset = sizeof(struct rtl8xxxu_tx_desc);
+
+	tx_desc->txdw0 =
+		TXDESC_OWN | TXDESC_FIRST_SEGMENT | TXDESC_LAST_SEGMENT;
+	if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+	    is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+		tx_desc->txdw0 |= TXDESC_BROADMULTICAST;
+
+	queue = rtl8xxxu_queue_select(hw, skb);
+	tx_desc->txdw1 = cpu_to_le32(queue << TXDESC_QUEUE_SHIFT);
+
+	if (tx_info->control.hw_key) {
+		switch (tx_info->control.hw_key->cipher) {
+		case WLAN_CIPHER_SUITE_WEP40:
+		case WLAN_CIPHER_SUITE_WEP104:
+		case WLAN_CIPHER_SUITE_TKIP:
+			tx_desc->txdw1 |= cpu_to_le32(TXDESC_SEC_RC4);
+			break;
+		case WLAN_CIPHER_SUITE_CCMP:
+			tx_desc->txdw1 |= cpu_to_le32(TXDESC_SEC_AES);
+			break;
+		default:
+			break;
+		}
+	}
+
+	seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+	tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT);
+
+	if (rate_flag & IEEE80211_TX_RC_MCS)
+		rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
+	else
+		rate = tx_rate->hw_value;
+	tx_desc->txdw5 = cpu_to_le32(rate);
+
+	if (ieee80211_is_data(hdr->frame_control))
+		tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
+
+	/* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */
+	if (ieee80211_is_data_qos(hdr->frame_control) && sta) {
+		if (sta->ht_cap.ht_supported) {
+			u32 ampdu, val32;
+
+			ampdu = (u32)sta->ht_cap.ampdu_density;
+			val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT;
+			tx_desc->txdw2 |= cpu_to_le32(val32);
+			tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE);
+		} else
+			tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK);
+	} else
+		tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK);
+
+	if (ieee80211_is_data_qos(hdr->frame_control))
+		tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS);
+	if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
+	    (sta && vif && vif->bss_conf.use_short_preamble))
+		tx_desc->txdw4 |= cpu_to_le32(TXDESC_SHORT_PREAMBLE);
+	if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
+	    (ieee80211_is_data_qos(hdr->frame_control) &&
+	     sta && sta->ht_cap.cap &
+	     (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
+		tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI);
+	}
+	if (ieee80211_is_mgmt(hdr->frame_control)) {
+		tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
+		tx_desc->txdw4 |= cpu_to_le32(TXDESC_USE_DRIVER_RATE);
+		tx_desc->txdw5 |= cpu_to_le32(6 << TXDESC_RETRY_LIMIT_SHIFT);
+		tx_desc->txdw5 |= cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE);
+	}
+
+	if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+		/* Use RTS rate 24M - does the mac80211 tell us which to use? */
+		tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M);
+		tx_desc->txdw4 |= cpu_to_le32(TXDESC_RTS_CTS_ENABLE);
+		tx_desc->txdw4 |= cpu_to_le32(TXDESC_HW_RTS_ENABLE);
+	}
+
+	rtl8xxxu_calc_tx_desc_csum(tx_desc);
+
+	usb_fill_bulk_urb(&tx_urb->urb, priv->udev, priv->pipe_out[queue],
+			  skb->data, skb->len, rtl8xxxu_tx_complete, skb);
+
+	usb_anchor_urb(&tx_urb->urb, &priv->tx_anchor);
+	ret = usb_submit_urb(&tx_urb->urb, GFP_ATOMIC);
+	if (ret) {
+		usb_unanchor_urb(&tx_urb->urb);
+		rtl8xxxu_free_tx_urb(priv, tx_urb);
+		goto error;
+	}
+	return;
+error:
+	dev_kfree_skb(skb);
+}
+
+static void rtl8xxxu_rx_parse_phystats(struct rtl8xxxu_priv *priv,
+				       struct ieee80211_rx_status *rx_status,
+				       struct rtl8xxxu_rx_desc *rx_desc,
+				       struct rtl8723au_phy_stats *phy_stats)
+{
+	if (phy_stats->sgi_en)
+		rx_status->flag |= RX_FLAG_SHORT_GI;
+
+	if (rx_desc->rxmcs < DESC_RATE_6M) {
+		/*
+		 * Handle PHY stats for CCK rates
+		 */
+		u8 cck_agc_rpt = phy_stats->cck_agc_rpt_ofdm_cfosho_a;
+
+		switch (cck_agc_rpt & 0xc0) {
+		case 0xc0:
+			rx_status->signal = -46 - (cck_agc_rpt & 0x3e);
+			break;
+		case 0x80:
+			rx_status->signal = -26 - (cck_agc_rpt & 0x3e);
+			break;
+		case 0x40:
+			rx_status->signal = -12 - (cck_agc_rpt & 0x3e);
+			break;
+		case 0x00:
+			rx_status->signal = 16 - (cck_agc_rpt & 0x3e);
+			break;
+		}
+	} else {
+		rx_status->signal =
+			(phy_stats->cck_sig_qual_ofdm_pwdb_all >> 1) - 110;
+	}
+}
+
+static void rtl8xxxu_free_rx_resources(struct rtl8xxxu_priv *priv)
+{
+	struct rtl8xxxu_rx_urb *rx_urb, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->rx_urb_lock, flags);
+
+	list_for_each_entry_safe(rx_urb, tmp,
+				 &priv->rx_urb_pending_list, list) {
+		list_del(&rx_urb->list);
+		priv->rx_urb_pending_count--;
+		usb_free_urb(&rx_urb->urb);
+	}
+
+	spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+}
+
+static void rtl8xxxu_queue_rx_urb(struct rtl8xxxu_priv *priv,
+				  struct rtl8xxxu_rx_urb *rx_urb)
+{
+	struct sk_buff *skb;
+	unsigned long flags;
+	int pending = 0;
+
+	spin_lock_irqsave(&priv->rx_urb_lock, flags);
+
+	if (!priv->shutdown) {
+		list_add_tail(&rx_urb->list, &priv->rx_urb_pending_list);
+		priv->rx_urb_pending_count++;
+		pending = priv->rx_urb_pending_count;
+	} else {
+		skb = (struct sk_buff *)rx_urb->urb.context;
+		dev_kfree_skb(skb);
+		usb_free_urb(&rx_urb->urb);
+	}
+
+	spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+	if (pending > RTL8XXXU_RX_URB_PENDING_WATER)
+		schedule_work(&priv->rx_urb_wq);
+}
+
+static void rtl8xxxu_rx_urb_work(struct work_struct *work)
+{
+	struct rtl8xxxu_priv *priv;
+	struct rtl8xxxu_rx_urb *rx_urb, *tmp;
+	struct list_head local;
+	struct sk_buff *skb;
+	unsigned long flags;
+	int ret;
+
+	priv = container_of(work, struct rtl8xxxu_priv, rx_urb_wq);
+	INIT_LIST_HEAD(&local);
+
+	spin_lock_irqsave(&priv->rx_urb_lock, flags);
+
+	list_splice_init(&priv->rx_urb_pending_list, &local);
+	priv->rx_urb_pending_count = 0;
+
+	spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+	list_for_each_entry_safe(rx_urb, tmp, &local, list) {
+		list_del_init(&rx_urb->list);
+		ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
+		/*
+		 * If out of memory or temporary error, put it back on the
+		 * queue and try again. Otherwise the device is dead/gone
+		 * and we should drop it.
+		 */
+		switch (ret) {
+		case 0:
+			break;
+		case -ENOMEM:
+		case -EAGAIN:
+			rtl8xxxu_queue_rx_urb(priv, rx_urb);
+			break;
+		default:
+			pr_info("failed to requeue urb %i\n", ret);
+			skb = (struct sk_buff *)rx_urb->urb.context;
+			dev_kfree_skb(skb);
+			usb_free_urb(&rx_urb->urb);
+		}
+	}
+}
+
+static void rtl8xxxu_rx_complete(struct urb *urb)
+{
+	struct rtl8xxxu_rx_urb *rx_urb =
+		container_of(urb, struct rtl8xxxu_rx_urb, urb);
+	struct ieee80211_hw *hw = rx_urb->hw;
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct sk_buff *skb = (struct sk_buff *)urb->context;
+	struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data;
+	struct rtl8723au_phy_stats *phy_stats;
+	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+	struct ieee80211_mgmt *mgmt;
+	struct device *dev = &priv->udev->dev;
+	__le32 *_rx_desc_le = (__le32 *)skb->data;
+	u32 *_rx_desc = (u32 *)skb->data;
+	int cnt, len, drvinfo_sz, desc_shift, i;
+
+	for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++)
+		_rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+
+	cnt = rx_desc->frag;
+	len = rx_desc->pktlen;
+	drvinfo_sz = rx_desc->drvinfo_sz * 8;
+	desc_shift = rx_desc->shift;
+	skb_put(skb, urb->actual_length);
+
+	if (urb->status == 0) {
+		skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc));
+		phy_stats = (struct rtl8723au_phy_stats *)skb->data;
+
+		skb_pull(skb, drvinfo_sz + desc_shift);
+
+		mgmt = (struct ieee80211_mgmt *)skb->data;
+
+		memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
+
+		if (rx_desc->phy_stats)
+			rtl8xxxu_rx_parse_phystats(priv, rx_status,
+						   rx_desc, phy_stats);
+
+		rx_status->freq = hw->conf.chandef.chan->center_freq;
+		rx_status->band = hw->conf.chandef.chan->band;
+
+		rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+		rx_status->flag |= RX_FLAG_MACTIME_START;
+
+		if (!rx_desc->swdec)
+			rx_status->flag |= RX_FLAG_DECRYPTED;
+		if (rx_desc->crc32)
+			rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+		if (rx_desc->bw)
+			rx_status->flag |= RX_FLAG_40MHZ;
+
+		if (rx_desc->rxht) {
+			rx_status->flag |= RX_FLAG_HT;
+			rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
+		} else {
+			rx_status->rate_idx = rx_desc->rxmcs;
+		}
+
+		ieee80211_rx_irqsafe(hw, skb);
+		skb = NULL;
+		rx_urb->urb.context = NULL;
+		rtl8xxxu_queue_rx_urb(priv, rx_urb);
+	} else {
+		dev_dbg(dev, "%s: status %i\n",	__func__, urb->status);
+		goto cleanup;
+	}
+	return;
+
+cleanup:
+	usb_free_urb(urb);
+	dev_kfree_skb(skb);
+	return;
+}
+
+static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
+				  struct rtl8xxxu_rx_urb *rx_urb)
+{
+	struct sk_buff *skb;
+	int skb_size;
+	int ret;
+
+	skb_size = sizeof(struct rtl8xxxu_rx_desc) + RTL_RX_BUFFER_SIZE;
+	skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	memset(skb->data, 0, sizeof(struct rtl8xxxu_rx_desc));
+	usb_fill_bulk_urb(&rx_urb->urb, priv->udev, priv->pipe_in, skb->data,
+			  skb_size, rtl8xxxu_rx_complete, skb);
+	usb_anchor_urb(&rx_urb->urb, &priv->rx_anchor);
+	ret = usb_submit_urb(&rx_urb->urb, GFP_ATOMIC);
+	if (ret)
+		usb_unanchor_urb(&rx_urb->urb);
+	return ret;
+}
+
+static void rtl8xxxu_int_complete(struct urb *urb)
+{
+	struct rtl8xxxu_priv *priv = (struct rtl8xxxu_priv *)urb->context;
+	struct device *dev = &priv->udev->dev;
+	int ret;
+
+	dev_dbg(dev, "%s: status %i\n", __func__, urb->status);
+	if (urb->status == 0) {
+		usb_anchor_urb(urb, &priv->int_anchor);
+		ret = usb_submit_urb(urb, GFP_ATOMIC);
+		if (ret)
+			usb_unanchor_urb(urb);
+	} else {
+		dev_info(dev, "%s: Error %i\n", __func__, urb->status);
+	}
+}
+
+
+static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct urb *urb;
+	u32 val32;
+	int ret;
+
+	urb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!urb)
+		return -ENOMEM;
+
+	usb_fill_int_urb(urb, priv->udev, priv->pipe_interrupt,
+			 priv->int_buf, USB_INTR_CONTENT_LENGTH,
+			 rtl8xxxu_int_complete, priv, 1);
+	usb_anchor_urb(urb, &priv->int_anchor);
+	ret = usb_submit_urb(urb, GFP_KERNEL);
+	if (ret) {
+		usb_unanchor_urb(urb);
+		goto error;
+	}
+
+	val32 = rtl8xxxu_read32(priv, REG_USB_HIMR);
+	val32 |= USB_HIMR_CPWM;
+	rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
+
+error:
+	return ret;
+}
+
+static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
+				  struct ieee80211_vif *vif)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	int ret;
+	u8 val8;
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		rtl8723a_stop_tx_beacon(priv);
+
+		val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
+		val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
+			BEACON_DISABLE_TSF_UPDATE;
+		rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
+		ret = 0;
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	rtl8xxxu_set_linktype(priv, vif->type);
+
+	return ret;
+}
+
+static void rtl8xxxu_remove_interface(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+
+	dev_dbg(&priv->udev->dev, "%s\n", __func__);
+}
+
+static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct device *dev = &priv->udev->dev;
+	u16 val16;
+	int ret = 0, channel;
+	bool ht40;
+
+	if (rtl8xxxu_debug & RTL8XXXU_DEBUG_CHANNEL)
+		dev_info(dev,
+			 "%s: channel: %i (changed %08x chandef.width %02x)\n",
+			 __func__, hw->conf.chandef.chan->hw_value,
+			 changed, hw->conf.chandef.width);
+
+	if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+		val16 = ((hw->conf.long_frame_max_tx_count <<
+			  RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) |
+			((hw->conf.short_frame_max_tx_count <<
+			  RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK);
+		rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		switch (hw->conf.chandef.width) {
+		case NL80211_CHAN_WIDTH_20_NOHT:
+		case NL80211_CHAN_WIDTH_20:
+			ht40 = false;
+			break;
+		case NL80211_CHAN_WIDTH_40:
+			ht40 = true;
+			break;
+		default:
+			ret = -ENOTSUPP;
+			goto exit;
+		}
+
+		channel = hw->conf.chandef.chan->hw_value;
+
+		rtl8723a_set_tx_power(priv, channel, ht40);
+
+		rtl8723au_config_channel(hw);
+	}
+
+exit:
+	return ret;
+}
+
+static int rtl8xxxu_conf_tx(struct ieee80211_hw *hw,
+			    struct ieee80211_vif *vif, u16 queue,
+			    const struct ieee80211_tx_queue_params *param)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct device *dev = &priv->udev->dev;
+	u32 val32;
+	u8 aifs, acm_ctrl, acm_bit;
+
+	aifs = param->aifs;
+
+	val32 = aifs |
+		fls(param->cw_min) << EDCA_PARAM_ECW_MIN_SHIFT |
+		fls(param->cw_max) << EDCA_PARAM_ECW_MAX_SHIFT |
+		(u32)param->txop << EDCA_PARAM_TXOP_SHIFT;
+
+	acm_ctrl = rtl8xxxu_read8(priv, REG_ACM_HW_CTRL);
+	dev_dbg(dev,
+		"%s: IEEE80211 queue %02x val %08x, acm %i, acm_ctrl %02x\n",
+		__func__, queue, val32, param->acm, acm_ctrl);
+
+	switch (queue) {
+	case IEEE80211_AC_VO:
+		acm_bit = ACM_HW_CTRL_VO;
+		rtl8xxxu_write32(priv, REG_EDCA_VO_PARAM, val32);
+		break;
+	case IEEE80211_AC_VI:
+		acm_bit = ACM_HW_CTRL_VI;
+		rtl8xxxu_write32(priv, REG_EDCA_VI_PARAM, val32);
+		break;
+	case IEEE80211_AC_BE:
+		acm_bit = ACM_HW_CTRL_BE;
+		rtl8xxxu_write32(priv, REG_EDCA_BE_PARAM, val32);
+		break;
+	case IEEE80211_AC_BK:
+		acm_bit = ACM_HW_CTRL_BK;
+		rtl8xxxu_write32(priv, REG_EDCA_BK_PARAM, val32);
+		break;
+	default:
+		acm_bit = 0;
+		break;
+	}
+
+	if (param->acm)
+		acm_ctrl |= acm_bit;
+	else
+		acm_ctrl &= ~acm_bit;
+	rtl8xxxu_write8(priv, REG_ACM_HW_CTRL, acm_ctrl);
+
+	return 0;
+}
+
+static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
+				      unsigned int changed_flags,
+				      unsigned int *total_flags, u64 multicast)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+
+	dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n",
+		__func__, changed_flags, *total_flags);
+
+	*total_flags &= (FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC);
+}
+
+static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts)
+{
+	if (rts > 2347)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+			    struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta,
+			    struct ieee80211_key_conf *key)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct device *dev = &priv->udev->dev;
+	u8 mac_addr[ETH_ALEN];
+	u8 val8;
+	u16 val16;
+	u32 val32;
+	int retval = -EOPNOTSUPP;
+
+	dev_dbg(dev, "%s: cmd %02x, cipher %08x, index %i\n",
+		__func__, cmd, key->cipher, key->keyidx);
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return -EOPNOTSUPP;
+
+	if (key->keyidx > 3)
+		return -EOPNOTSUPP;
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+		dev_dbg(dev, "%s: pairwise key\n", __func__);
+		ether_addr_copy(mac_addr, sta->addr);
+	} else {
+		dev_dbg(dev, "%s: group key\n", __func__);
+		eth_broadcast_addr(mac_addr);
+	}
+
+	val16 = rtl8xxxu_read16(priv, REG_CR);
+	val16 |= CR_SECURITY_ENABLE;
+	rtl8xxxu_write16(priv, REG_CR, val16);
+
+	val8 = SEC_CFG_TX_SEC_ENABLE | SEC_CFG_TXBC_USE_DEFKEY |
+		SEC_CFG_RX_SEC_ENABLE | SEC_CFG_RXBC_USE_DEFKEY;
+	val8 |= SEC_CFG_TX_USE_DEFKEY | SEC_CFG_RX_USE_DEFKEY;
+	rtl8xxxu_write8(priv, REG_SECURITY_CFG, val8);
+
+	switch (cmd) {
+	case SET_KEY:
+		key->hw_key_idx = key->keyidx;
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+		rtl8xxxu_cam_write(priv, key, mac_addr);
+		retval = 0;
+		break;
+	case DISABLE_KEY:
+		rtl8xxxu_write32(priv, REG_CAM_WRITE, 0x00000000);
+		val32 = CAM_CMD_POLLING | CAM_CMD_WRITE |
+			key->keyidx << CAM_CMD_KEY_SHIFT;
+		rtl8xxxu_write32(priv, REG_CAM_CMD, val32);
+		retval = 0;
+		break;
+	default:
+		dev_warn(dev, "%s: Unsupported command %02x\n", __func__, cmd);
+	}
+
+	return retval;
+}
+
+static int
+rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		      enum ieee80211_ampdu_mlme_action action,
+		      struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
+		      bool amsdu)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct device *dev = &priv->udev->dev;
+	u8 ampdu_factor, ampdu_density;
+
+	switch (action) {
+	case IEEE80211_AMPDU_TX_START:
+		dev_info(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__);
+		ampdu_factor = sta->ht_cap.ampdu_factor;
+		ampdu_density = sta->ht_cap.ampdu_density;
+		rtl8xxxu_set_ampdu_factor(priv, ampdu_factor);
+		rtl8xxxu_set_ampdu_min_space(priv, ampdu_density);
+		dev_dbg(dev,
+			"Changed HT: ampdu_factor %02x, ampdu_density %02x\n",
+			ampdu_factor, ampdu_density);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+		dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
+		rtl8xxxu_set_ampdu_factor(priv, 0);
+		rtl8xxxu_set_ampdu_min_space(priv, 0);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
+			 __func__);
+		rtl8xxxu_set_ampdu_factor(priv, 0);
+		rtl8xxxu_set_ampdu_min_space(priv, 0);
+		break;
+	case IEEE80211_AMPDU_RX_START:
+		dev_info(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		dev_info(dev, "%s: IEEE80211_AMPDU_RX_STOP\n", __func__);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int rtl8xxxu_start(struct ieee80211_hw *hw)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	struct rtl8xxxu_rx_urb *rx_urb;
+	struct rtl8xxxu_tx_urb *tx_urb;
+	unsigned long flags;
+	int ret, i;
+
+	ret = 0;
+
+	init_usb_anchor(&priv->rx_anchor);
+	init_usb_anchor(&priv->tx_anchor);
+	init_usb_anchor(&priv->int_anchor);
+
+	rtl8723a_enable_rf(priv);
+	ret = rtl8xxxu_submit_int_urb(hw);
+	if (ret)
+		goto exit;
+
+	for (i = 0; i < RTL8XXXU_TX_URBS; i++) {
+		tx_urb = kmalloc(sizeof(struct rtl8xxxu_tx_urb), GFP_KERNEL);
+		if (!tx_urb) {
+			if (!i)
+				ret = -ENOMEM;
+
+			goto error_out;
+		}
+		usb_init_urb(&tx_urb->urb);
+		INIT_LIST_HEAD(&tx_urb->list);
+		tx_urb->hw = hw;
+		list_add(&tx_urb->list, &priv->tx_urb_free_list);
+		priv->tx_urb_free_count++;
+	}
+
+	priv->tx_stopped = false;
+
+	spin_lock_irqsave(&priv->rx_urb_lock, flags);
+	priv->shutdown = false;
+	spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+	for (i = 0; i < RTL8XXXU_RX_URBS; i++) {
+		rx_urb = kmalloc(sizeof(struct rtl8xxxu_rx_urb), GFP_KERNEL);
+		if (!rx_urb) {
+			if (!i)
+				ret = -ENOMEM;
+
+			goto error_out;
+		}
+		usb_init_urb(&rx_urb->urb);
+		INIT_LIST_HEAD(&rx_urb->list);
+		rx_urb->hw = hw;
+
+		ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
+	}
+exit:
+	/*
+	 * Disable all data frames
+	 */
+	rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+	/*
+	 * Accept all mgmt frames
+	 */
+	rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0xffff);
+
+	rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e);
+
+	return ret;
+
+error_out:
+	rtl8xxxu_free_tx_resources(priv);
+	/*
+	 * Disable all data and mgmt frames
+	 */
+	rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+	rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0x0000);
+
+	return ret;
+}
+
+static void rtl8xxxu_stop(struct ieee80211_hw *hw)
+{
+	struct rtl8xxxu_priv *priv = hw->priv;
+	unsigned long flags;
+
+	rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
+	rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0x0000);
+	rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000);
+
+	spin_lock_irqsave(&priv->rx_urb_lock, flags);
+	priv->shutdown = true;
+	spin_unlock_irqrestore(&priv->rx_urb_lock, flags);
+
+	usb_kill_anchored_urbs(&priv->rx_anchor);
+	usb_kill_anchored_urbs(&priv->tx_anchor);
+	usb_kill_anchored_urbs(&priv->int_anchor);
+
+	rtl8723a_disable_rf(priv);
+
+	/*
+	 * Disable interrupts
+	 */
+	rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+
+	rtl8xxxu_free_rx_resources(priv);
+	rtl8xxxu_free_tx_resources(priv);
+}
+
+static const struct ieee80211_ops rtl8xxxu_ops = {
+	.tx = rtl8xxxu_tx,
+	.add_interface = rtl8xxxu_add_interface,
+	.remove_interface = rtl8xxxu_remove_interface,
+	.config = rtl8xxxu_config,
+	.conf_tx = rtl8xxxu_conf_tx,
+	.bss_info_changed = rtl8xxxu_bss_info_changed,
+	.configure_filter = rtl8xxxu_configure_filter,
+	.set_rts_threshold = rtl8xxxu_set_rts_threshold,
+	.start = rtl8xxxu_start,
+	.stop = rtl8xxxu_stop,
+	.sw_scan_start = rtl8xxxu_sw_scan_start,
+	.sw_scan_complete = rtl8xxxu_sw_scan_complete,
+	.set_key = rtl8xxxu_set_key,
+	.ampdu_action = rtl8xxxu_ampdu_action,
+};
+
+static int rtl8xxxu_parse_usb(struct rtl8xxxu_priv *priv,
+			      struct usb_interface *interface)
+{
+	struct usb_interface_descriptor *interface_desc;
+	struct usb_host_interface *host_interface;
+	struct usb_endpoint_descriptor *endpoint;
+	struct device *dev = &priv->udev->dev;
+	int i, j = 0, endpoints;
+	u8 dir, xtype, num;
+	int ret = 0;
+
+	host_interface = &interface->altsetting[0];
+	interface_desc = &host_interface->desc;
+	endpoints = interface_desc->bNumEndpoints;
+
+	for (i = 0; i < endpoints; i++) {
+		endpoint = &host_interface->endpoint[i].desc;
+
+		dir = endpoint->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
+		num = usb_endpoint_num(endpoint);
+		xtype = usb_endpoint_type(endpoint);
+		if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+			dev_dbg(dev,
+				"%s: endpoint: dir %02x, # %02x, type %02x\n",
+				__func__, dir, num, xtype);
+		if (usb_endpoint_dir_in(endpoint) &&
+		    usb_endpoint_xfer_bulk(endpoint)) {
+			if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+				dev_dbg(dev, "%s: in endpoint num %i\n",
+					__func__, num);
+
+			if (priv->pipe_in) {
+				dev_warn(dev,
+					 "%s: Too many IN pipes\n", __func__);
+				ret = -EINVAL;
+				goto exit;
+			}
+
+			priv->pipe_in =	usb_rcvbulkpipe(priv->udev, num);
+		}
+
+		if (usb_endpoint_dir_in(endpoint) &&
+		    usb_endpoint_xfer_int(endpoint)) {
+			if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+				dev_dbg(dev, "%s: interrupt endpoint num %i\n",
+					__func__, num);
+
+			if (priv->pipe_interrupt) {
+				dev_warn(dev, "%s: Too many INTERRUPT pipes\n",
+					 __func__);
+				ret = -EINVAL;
+				goto exit;
+			}
+
+			priv->pipe_interrupt = usb_rcvintpipe(priv->udev, num);
+		}
+
+		if (usb_endpoint_dir_out(endpoint) &&
+		    usb_endpoint_xfer_bulk(endpoint)) {
+			if (rtl8xxxu_debug & RTL8XXXU_DEBUG_USB)
+				dev_dbg(dev, "%s: out endpoint num %i\n",
+					__func__, num);
+			if (j >= RTL8XXXU_OUT_ENDPOINTS) {
+				dev_warn(dev,
+					 "%s: Too many OUT pipes\n", __func__);
+				ret = -EINVAL;
+				goto exit;
+			}
+			priv->out_ep[j++] = num;
+		}
+	}
+exit:
+	priv->nr_out_eps = j;
+	return ret;
+}
+
+static int rtl8xxxu_probe(struct usb_interface *interface,
+			  const struct usb_device_id *id)
+{
+	struct rtl8xxxu_priv *priv;
+	struct ieee80211_hw *hw;
+	struct usb_device *udev;
+	struct ieee80211_supported_band *sband;
+	int ret = 0;
+	int untested = 1;
+
+	udev = usb_get_dev(interface_to_usbdev(interface));
+
+	switch (id->idVendor) {
+	case USB_VENDOR_ID_REALTEK:
+		switch(id->idProduct) {
+		case 0x1724:
+		case 0x8176:
+		case 0x8178:
+		case 0x817f:
+			untested = 0;
+			break;
+		}
+		break;
+	case 0x7392:
+		if (id->idProduct == 0x7811)
+			untested = 0;
+		break;
+	default:
+		break;
+	}
+
+	if (untested) {
+		rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
+		dev_info(&udev->dev,
+			 "This Realtek USB WiFi dongle (0x%04x:0x%04x) is untested!\n",
+			 id->idVendor, id->idProduct);
+		dev_info(&udev->dev,
+			 "Please report results to Jes.Sorensen@gmail.com\n");
+	}
+
+	hw = ieee80211_alloc_hw(sizeof(struct rtl8xxxu_priv), &rtl8xxxu_ops);
+	if (!hw) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	priv = hw->priv;
+	priv->hw = hw;
+	priv->udev = udev;
+	priv->fops = (struct rtl8xxxu_fileops *)id->driver_info;
+	mutex_init(&priv->usb_buf_mutex);
+	mutex_init(&priv->h2c_mutex);
+	INIT_LIST_HEAD(&priv->tx_urb_free_list);
+	spin_lock_init(&priv->tx_urb_lock);
+	INIT_LIST_HEAD(&priv->rx_urb_pending_list);
+	spin_lock_init(&priv->rx_urb_lock);
+	INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
+
+	usb_set_intfdata(interface, hw);
+
+	ret = rtl8xxxu_parse_usb(priv, interface);
+	if (ret)
+		goto exit;
+
+	ret = rtl8xxxu_identify_chip(priv);
+	if (ret) {
+		dev_err(&udev->dev, "Fatal - failed to identify chip\n");
+		goto exit;
+	}
+
+	ret = rtl8xxxu_read_efuse(priv);
+	if (ret) {
+		dev_err(&udev->dev, "Fatal - failed to read EFuse\n");
+		goto exit;
+	}
+
+	ret = priv->fops->parse_efuse(priv);
+	if (ret) {
+		dev_err(&udev->dev, "Fatal - failed to parse EFuse\n");
+		goto exit;
+	}
+
+	rtl8xxxu_print_chipinfo(priv);
+
+	ret = priv->fops->load_firmware(priv);
+	if (ret) {
+		dev_err(&udev->dev, "Fatal - failed to load firmware\n");
+		goto exit;
+	}
+
+	ret = rtl8xxxu_init_device(hw);
+
+	hw->wiphy->max_scan_ssids = 1;
+	hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
+	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+	hw->queues = 4;
+
+	sband = &rtl8xxxu_supported_band;
+	sband->ht_cap.ht_supported = true;
+	sband->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+	sband->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+	sband->ht_cap.cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40;
+	memset(&sband->ht_cap.mcs, 0, sizeof(sband->ht_cap.mcs));
+	sband->ht_cap.mcs.rx_mask[0] = 0xff;
+	sband->ht_cap.mcs.rx_mask[4] = 0x01;
+	if (priv->rf_paths > 1) {
+		sband->ht_cap.mcs.rx_mask[1] = 0xff;
+		sband->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+	}
+	sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+	/*
+	 * Some APs will negotiate HT20_40 in a noisy environment leading
+	 * to miserable performance. Rather than defaulting to this, only
+	 * enable it if explicitly requested at module load time.
+	 */
+	if (rtl8xxxu_ht40_2g) {
+		dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n");
+		sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+	}
+	hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+
+	hw->wiphy->rts_threshold = 2347;
+
+	SET_IEEE80211_DEV(priv->hw, &interface->dev);
+	SET_IEEE80211_PERM_ADDR(hw, priv->mac_addr);
+
+	hw->extra_tx_headroom = sizeof(struct rtl8xxxu_tx_desc);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	/*
+	 * The firmware handles rate control
+	 */
+	ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+
+	ret = ieee80211_register_hw(priv->hw);
+	if (ret) {
+		dev_err(&udev->dev, "%s: Failed to register: %i\n",
+			__func__, ret);
+		goto exit;
+	}
+
+exit:
+	if (ret < 0)
+		usb_put_dev(udev);
+	return ret;
+}
+
+static void rtl8xxxu_disconnect(struct usb_interface *interface)
+{
+	struct rtl8xxxu_priv *priv;
+	struct ieee80211_hw *hw;
+
+	hw = usb_get_intfdata(interface);
+	priv = hw->priv;
+
+	rtl8xxxu_disable_device(hw);
+	usb_set_intfdata(interface, NULL);
+
+	dev_info(&priv->udev->dev, "disconnecting\n");
+
+	ieee80211_unregister_hw(hw);
+
+	kfree(priv->fw_data);
+	mutex_destroy(&priv->usb_buf_mutex);
+	mutex_destroy(&priv->h2c_mutex);
+
+	usb_put_dev(priv->udev);
+	ieee80211_free_hw(hw);
+}
+
+static struct rtl8xxxu_fileops rtl8723au_fops = {
+	.parse_efuse = rtl8723au_parse_efuse,
+	.load_firmware = rtl8723au_load_firmware,
+	.power_on = rtl8723au_power_on,
+	.writeN_block_size = 1024,
+};
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+
+static struct rtl8xxxu_fileops rtl8192cu_fops = {
+	.parse_efuse = rtl8192cu_parse_efuse,
+	.load_firmware = rtl8192cu_load_firmware,
+	.power_on = rtl8192cu_power_on,
+	.writeN_block_size = 128,
+};
+
+#endif
+
+static struct usb_device_id dev_table[] = {
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8724, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8723au_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1724, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8723au_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x0724, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8723au_fops},
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+/* Still supported by rtlwifi */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8178, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817f, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Larry Finger */
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+/* Currently untested 8188 series devices */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8170, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8177, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817a, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817b, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817d, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x817e, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818a, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x1058, 0x0631, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x094c, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1102, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe033, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8189, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9041, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ba, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1e1e, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x5088, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0052, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x005c, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0eb0, 0x9071, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x103c, 0x1629, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2a, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2e, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xed17, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x648b, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0090, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops}, /* Netcore 8188RU */
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff9, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffa, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff8, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffb, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaffc, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x1201, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+/* Currently untested 8192 series devices */
+{USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0950, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2102, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2103, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0586, 0x341f, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x06f8, 0xe035, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x17ab, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0061, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0070, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0789, 0x016d, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07aa, 0x0056, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8178, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9021, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0xf001, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x2e2e, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0019, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0020, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3307, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3309, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330a, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab2b, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x624d, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0100, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0091, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7822, 0xff, 0xff, 0xff),
+	.driver_info = (unsigned long)&rtl8192cu_fops},
+#endif
+{ }
+};
+
+static struct usb_driver rtl8xxxu_driver = {
+	.name = DRIVER_NAME,
+	.probe = rtl8xxxu_probe,
+	.disconnect = rtl8xxxu_disconnect,
+	.id_table = dev_table,
+	.disable_hub_initiated_lpm = 1,
+};
+
+static int __init rtl8xxxu_module_init(void)
+{
+	int res;
+
+	res = usb_register(&rtl8xxxu_driver);
+	if (res < 0)
+		pr_err(DRIVER_NAME ": usb_register() failed (%i)\n", res);
+
+	return res;
+}
+
+static void __exit rtl8xxxu_module_exit(void)
+{
+	usb_deregister(&rtl8xxxu_driver);
+}
+
+
+MODULE_DEVICE_TABLE(usb, dev_table);
+
+module_init(rtl8xxxu_module_init);
+module_exit(rtl8xxxu_module_exit);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
new file mode 100644
index 0000000..f2a1bac
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Register definitions taken from original Realtek rtl8723au driver
+ */
+
+#include <asm/byteorder.h>
+
+#define RTL8XXXU_DEBUG_REG_WRITE	0x01
+#define RTL8XXXU_DEBUG_REG_READ		0x02
+#define RTL8XXXU_DEBUG_RFREG_WRITE	0x04
+#define RTL8XXXU_DEBUG_RFREG_READ	0x08
+#define RTL8XXXU_DEBUG_CHANNEL		0x10
+#define RTL8XXXU_DEBUG_TX		0x20
+#define RTL8XXXU_DEBUG_TX_DUMP		0x40
+#define RTL8XXXU_DEBUG_RX		0x80
+#define RTL8XXXU_DEBUG_RX_DUMP		0x100
+#define RTL8XXXU_DEBUG_USB		0x200
+#define RTL8XXXU_DEBUG_KEY		0x400
+#define RTL8XXXU_DEBUG_H2C		0x800
+#define RTL8XXXU_DEBUG_ACTION		0x1000
+#define RTL8XXXU_DEBUG_EFUSE		0x2000
+
+#define RTW_USB_CONTROL_MSG_TIMEOUT	500
+#define RTL8XXXU_MAX_REG_POLL		500
+#define	USB_INTR_CONTENT_LENGTH		56
+
+#define RTL8XXXU_OUT_ENDPOINTS		3
+
+#define REALTEK_USB_READ		0xc0
+#define REALTEK_USB_WRITE		0x40
+#define REALTEK_USB_CMD_REQ		0x05
+#define REALTEK_USB_CMD_IDX		0x00
+
+#define TX_TOTAL_PAGE_NUM		0xf8
+/* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
+#define TX_PAGE_NUM_PUBQ		0xe7
+#define TX_PAGE_NUM_HI_PQ		0x0c
+#define TX_PAGE_NUM_LO_PQ		0x02
+#define TX_PAGE_NUM_NORM_PQ		0x02
+
+#define RTL_FW_PAGE_SIZE		4096
+#define RTL8XXXU_FIRMWARE_POLL_MAX	1000
+
+#define RTL8723A_CHANNEL_GROUPS		3
+#define RTL8723A_MAX_RF_PATHS		2
+#define RF6052_MAX_TX_PWR		0x3f
+
+#define EFUSE_MAP_LEN_8723A		256
+#define EFUSE_MAX_SECTION_8723A		32
+#define EFUSE_REAL_CONTENT_LEN_8723A	512
+#define EFUSE_BT_MAP_LEN_8723A		1024
+#define EFUSE_MAX_WORD_UNIT		4
+
+struct rtl8xxxu_rx_desc {
+#ifdef __LITTLE_ENDIAN
+	u32 pktlen:14;
+	u32 crc32:1;
+	u32 icverr:1;
+	u32 drvinfo_sz:4;
+	u32 security:3;
+	u32 qos:1;
+	u32 shift:2;
+	u32 phy_stats:1;
+	u32 swdec:1;
+	u32 ls:1;
+	u32 fs:1;
+	u32 eor:1;
+	u32 own:1;
+
+	u32 macid:5;
+	u32 tid:4;
+	u32 hwrsvd:4;
+	u32 amsdu:1;
+	u32 paggr:1;
+	u32 faggr:1;
+	u32 a1fit:4;
+	u32 a2fit:4;
+	u32 pam:1;
+	u32 pwr:1;
+	u32 md:1;
+	u32 mf:1;
+	u32 type:2;
+	u32 mc:1;
+	u32 bc:1;
+
+	u32 seq:12;
+	u32 frag:4;
+	u32 nextpktlen:14;
+	u32 nextind:1;
+	u32 reserved0:1;
+
+	u32 rxmcs:6;
+	u32 rxht:1;
+	u32 gf:1;
+	u32 splcp:1;
+	u32 bw:1;
+	u32 htc:1;
+	u32 eosp:1;
+	u32 bssidfit:2;
+	u32 reserved1:16;
+	u32 unicastwake:1;
+	u32 magicwake:1;
+
+	u32 pattern0match:1;
+	u32 pattern1match:1;
+	u32 pattern2match:1;
+	u32 pattern3match:1;
+	u32 pattern4match:1;
+	u32 pattern5match:1;
+	u32 pattern6match:1;
+	u32 pattern7match:1;
+	u32 pattern8match:1;
+	u32 pattern9match:1;
+	u32 patternamatch:1;
+	u32 patternbmatch:1;
+	u32 patterncmatch:1;
+	u32 reserved2:19;
+#else
+	u32 own:1;
+	u32 eor:1;
+	u32 fs:1;
+	u32 ls:1;
+	u32 swdec:1;
+	u32 phy_stats:1;
+	u32 shift:2;
+	u32 qos:1;
+	u32 security:3;
+	u32 drvinfo_sz:4;
+	u32 icverr:1;
+	u32 crc32:1;
+	u32 pktlen:14;
+
+	u32 bc:1;
+	u32 mc:1;
+	u32 type:2;
+	u32 mf:1;
+	u32 md:1;
+	u32 pwr:1;
+	u32 pam:1;
+	u32 a2fit:4;
+	u32 a1fit:4;
+	u32 faggr:1;
+	u32 paggr:1;
+	u32 amsdu:1;
+	u32 hwrsvd:4;
+	u32 tid:4;
+	u32 macid:5;
+
+	u32 reserved0:1;
+	u32 nextind:1;
+	u32 nextpktlen:14;
+	u32 frag:4;
+	u32 seq:12;
+
+	u32 magicwake:1;
+	u32 unicastwake:1;
+	u32 reserved1:16;
+	u32 bssidfit:2;
+	u32 eosp:1;
+	u32 htc:1;
+	u32 bw:1;
+	u32 splcp:1;
+	u32 gf:1;
+	u32 rxht:1;
+	u32 rxmcs:6;
+
+	u32 reserved2:19;
+	u32 patterncmatch:1;
+	u32 patternbmatch:1;
+	u32 patternamatch:1;
+	u32 pattern9match:1;
+	u32 pattern8match:1;
+	u32 pattern7match:1;
+	u32 pattern6match:1;
+	u32 pattern5match:1;
+	u32 pattern4match:1;
+	u32 pattern3match:1;
+	u32 pattern2match:1;
+	u32 pattern1match:1;
+	u32 pattern0match:1;
+#endif
+	__le32 tsfl;
+#if 0
+	u32 bassn:12;
+	u32 bavld:1;
+	u32 reserved3:19;
+#endif
+};
+
+struct rtl8xxxu_tx_desc {
+	__le16 pkt_size;
+	u8 pkt_offset;
+	u8 txdw0;
+	__le32 txdw1;
+	__le32 txdw2;
+	__le32 txdw3;
+	__le32 txdw4;
+	__le32 txdw5;
+	__le32 txdw6;
+	__le16 csum;
+	__le16 txdw7;
+};
+
+/*  CCK Rates, TxHT = 0 */
+#define DESC_RATE_1M			0x00
+#define DESC_RATE_2M			0x01
+#define DESC_RATE_5_5M			0x02
+#define DESC_RATE_11M			0x03
+
+/*  OFDM Rates, TxHT = 0 */
+#define DESC_RATE_6M			0x04
+#define DESC_RATE_9M			0x05
+#define DESC_RATE_12M			0x06
+#define DESC_RATE_18M			0x07
+#define DESC_RATE_24M			0x08
+#define DESC_RATE_36M			0x09
+#define DESC_RATE_48M			0x0a
+#define DESC_RATE_54M			0x0b
+
+/*  MCS Rates, TxHT = 1 */
+#define DESC_RATE_MCS0			0x0c
+#define DESC_RATE_MCS1			0x0d
+#define DESC_RATE_MCS2			0x0e
+#define DESC_RATE_MCS3			0x0f
+#define DESC_RATE_MCS4			0x10
+#define DESC_RATE_MCS5			0x11
+#define DESC_RATE_MCS6			0x12
+#define DESC_RATE_MCS7			0x13
+#define DESC_RATE_MCS8			0x14
+#define DESC_RATE_MCS9			0x15
+#define DESC_RATE_MCS10			0x16
+#define DESC_RATE_MCS11			0x17
+#define DESC_RATE_MCS12			0x18
+#define DESC_RATE_MCS13			0x19
+#define DESC_RATE_MCS14			0x1a
+#define DESC_RATE_MCS15			0x1b
+#define DESC_RATE_MCS15_SG		0x1c
+#define DESC_RATE_MCS32			0x20
+
+#define TXDESC_OFFSET_SZ		0
+#define TXDESC_OFFSET_SHT		16
+#if 0
+#define TXDESC_BMC			BIT(24)
+#define TXDESC_LSG			BIT(26)
+#define TXDESC_FSG			BIT(27)
+#define TXDESC_OWN			BIT(31)
+#else
+#define TXDESC_BROADMULTICAST		BIT(0)
+#define TXDESC_LAST_SEGMENT		BIT(2)
+#define TXDESC_FIRST_SEGMENT		BIT(3)
+#define TXDESC_OWN			BIT(7)
+#endif
+
+/* Word 1 */
+#define TXDESC_PKT_OFFSET_SZ		0
+#define TXDESC_AGG_ENABLE		BIT(5)
+#define TXDESC_BK			BIT(6)
+#define TXDESC_QUEUE_SHIFT		8
+#define TXDESC_QUEUE_MASK		0x1f00
+#define TXDESC_QUEUE_BK			0x2
+#define TXDESC_QUEUE_BE			0x0
+#define TXDESC_QUEUE_VI			0x5
+#define TXDESC_QUEUE_VO			0x7
+#define TXDESC_QUEUE_BEACON		0x10
+#define TXDESC_QUEUE_HIGH		0x11
+#define TXDESC_QUEUE_MGNT		0x12
+#define TXDESC_QUEUE_CMD		0x13
+#define TXDESC_QUEUE_MAX		(TXDESC_QUEUE_CMD + 1)
+
+#define DESC_RATE_ID_SHIFT		16
+#define DESC_RATE_ID_MASK		0xf
+#define TXDESC_NAVUSEHDR		BIT(20)
+#define TXDESC_SEC_RC4			0x00400000
+#define TXDESC_SEC_AES			0x00c00000
+#define TXDESC_PKT_OFFSET_SHIFT		26
+#define TXDESC_AGG_EN			BIT(29)
+#define TXDESC_HWPC			BIT(31)
+
+/* Word 2 */
+#define TXDESC_ACK_REPORT		BIT(19)
+#define TXDESC_AMPDU_DENSITY_SHIFT	20
+
+/* Word 3 */
+#define TXDESC_SEQ_SHIFT		16
+#define TXDESC_SEQ_MASK			0x0fff0000
+
+/* Word 4 */
+#define TXDESC_QOS			BIT(6)
+#define TXDESC_HW_SEQ_ENABLE		BIT(7)
+#define TXDESC_USE_DRIVER_RATE		BIT(8)
+#define TXDESC_DISABLE_DATA_FB		BIT(10)
+#define TXDESC_CTS_SELF_ENABLE		BIT(11)
+#define TXDESC_RTS_CTS_ENABLE		BIT(12)
+#define TXDESC_HW_RTS_ENABLE		BIT(13)
+#define TXDESC_PRIME_CH_OFF_LOWER	BIT(20)
+#define TXDESC_PRIME_CH_OFF_UPPER	BIT(21)
+#define TXDESC_SHORT_PREAMBLE		BIT(24)
+#define TXDESC_DATA_BW			BIT(25)
+#define TXDESC_RTS_DATA_BW		BIT(27)
+#define TXDESC_RTS_PRIME_CH_OFF_LOWER	BIT(28)
+#define TXDESC_RTS_PRIME_CH_OFF_UPPER	BIT(29)
+
+/* Word 5 */
+#define TXDESC_RTS_RATE_SHIFT		0
+#define TXDESC_RTS_RATE_MASK		0x3f
+#define TXDESC_SHORT_GI			BIT(6)
+#define TXDESC_CCX_TAG			BIT(7)
+#define TXDESC_RETRY_LIMIT_ENABLE	BIT(17)
+#define TXDESC_RETRY_LIMIT_SHIFT	18
+#define TXDESC_RETRY_LIMIT_MASK		0x00fc0000
+
+/* Word 6 */
+#define TXDESC_MAX_AGG_SHIFT		11
+
+struct phy_rx_agc_info {
+#ifdef __LITTLE_ENDIAN
+	u8	gain:7, trsw:1;
+#else
+	u8	trsw:1, gain:7;
+#endif
+};
+
+struct rtl8723au_phy_stats {
+	struct phy_rx_agc_info path_agc[RTL8723A_MAX_RF_PATHS];
+	u8	ch_corr[RTL8723A_MAX_RF_PATHS];
+	u8	cck_sig_qual_ofdm_pwdb_all;
+	u8	cck_agc_rpt_ofdm_cfosho_a;
+	u8	cck_rpt_b_ofdm_cfosho_b;
+	u8	reserved_1;
+	u8	noise_power_db_msb;
+	u8	path_cfotail[RTL8723A_MAX_RF_PATHS];
+	u8	pcts_mask[RTL8723A_MAX_RF_PATHS];
+	s8	stream_rxevm[RTL8723A_MAX_RF_PATHS];
+	u8	path_rxsnr[RTL8723A_MAX_RF_PATHS];
+	u8	noise_power_db_lsb;
+	u8	reserved_2[3];
+	u8	stream_csi[RTL8723A_MAX_RF_PATHS];
+	u8	stream_target_csi[RTL8723A_MAX_RF_PATHS];
+	s8	sig_evm;
+	u8	reserved_3;
+
+#ifdef __LITTLE_ENDIAN
+	u8	antsel_rx_keep_2:1;	/* ex_intf_flg:1; */
+	u8	sgi_en:1;
+	u8	rxsc:2;
+	u8	idle_long:1;
+	u8	r_ant_train_en:1;
+	u8	antenna_select_b:1;
+	u8	antenna_select:1;
+#else	/*  _BIG_ENDIAN_ */
+	u8	antenna_select:1;
+	u8	antenna_select_b:1;
+	u8	r_ant_train_en:1;
+	u8	idle_long:1;
+	u8	rxsc:2;
+	u8	sgi_en:1;
+	u8	antsel_rx_keep_2:1;	/* ex_intf_flg:1; */
+#endif
+};
+
+/*
+ * Regs to backup
+ */
+#define RTL8XXXU_ADDA_REGS		16
+#define RTL8XXXU_MAC_REGS		4
+#define RTL8XXXU_BB_REGS		9
+
+struct rtl8xxxu_firmware_header {
+	__le16	signature;		/*  92C0: test chip; 92C,
+					    88C0: test chip;
+					    88C1: MP A-cut;
+					    92C1: MP A-cut */
+	u8	category;		/*  AP/NIC and USB/PCI */
+	u8	function;
+
+	__le16	major_version;		/*  FW Version */
+	u8	minor_version;		/*  FW Subversion, default 0x00 */
+	u8	reserved1;
+
+	u8	month;			/*  Release time Month field */
+	u8	date;			/*  Release time Date field */
+	u8	hour;			/*  Release time Hour field */
+	u8	minute;			/*  Release time Minute field */
+
+	__le16	ramcodesize;		/*  Size of RAM code */
+	u16	reserved2;
+
+	__le32	svn_idx;		/*  SVN entry index */
+	u32	reserved3;
+
+	u32	reserved4;
+	u32	reserved5;
+
+	u8	data[0];
+};
+
+/*
+ * The 8723au has 3 channel groups: 1-3, 4-9, and 10-14
+ */
+struct rtl8723au_idx {
+#ifdef __LITTLE_ENDIAN
+	int	a:4;
+	int	b:4;
+#else
+	int	b:4;
+	int	a:4;
+#endif
+} __attribute__((packed));
+
+struct rtl8723au_efuse {
+	__le16 rtl_id;
+	u8 res0[0xe];
+	u8 cck_tx_power_index_A[3];	/* 0x10 */
+	u8 cck_tx_power_index_B[3];
+	u8 ht40_1s_tx_power_index_A[3];	/* 0x16 */
+	u8 ht40_1s_tx_power_index_B[3];
+	/*
+	 * The following entries are half-bytes split as:
+	 * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
+	 */
+	struct rtl8723au_idx ht20_tx_power_index_diff[3];
+	struct rtl8723au_idx ofdm_tx_power_index_diff[3];
+	struct rtl8723au_idx ht40_max_power_offset[3];
+	struct rtl8723au_idx ht20_max_power_offset[3];
+	u8 channel_plan;		/* 0x28 */
+	u8 tssi_a;
+	u8 thermal_meter;
+	u8 rf_regulatory;
+	u8 rf_option_2;
+	u8 rf_option_3;
+	u8 rf_option_4;
+	u8 res7;
+	u8 version			/* 0x30 */;
+	u8 customer_id_major;
+	u8 customer_id_minor;
+	u8 xtal_k;
+	u8 chipset;			/* 0x34 */
+	u8 res8[0x82];
+	u8 vid;				/* 0xb7 */
+	u8 res9;
+	u8 pid;				/* 0xb9 */
+	u8 res10[0x0c];
+	u8 mac_addr[ETH_ALEN];		/* 0xc6 */
+	u8 res11[2];
+	u8 vendor_name[7];
+	u8 res12[2];
+	u8 device_name[0x29];		/* 0xd7 */
+};
+
+struct rtl8192cu_efuse {
+	__le16 rtl_id;
+	__le16 hpon;
+	u8 res0[2];
+	__le16 clk;
+	__le16 testr;
+	__le16 vid;
+	__le16 did;
+	__le16 svid;
+	__le16 smid;						/* 0x10 */
+	u8 res1[4];
+	u8 mac_addr[ETH_ALEN];					/* 0x16 */
+	u8 res2[2];
+	u8 vendor_name[7];
+	u8 res3[3];
+	u8 device_name[0x14];					/* 0x28 */
+	u8 res4[0x1e];						/* 0x3c */
+	u8 cck_tx_power_index_A[3];				/* 0x5a */
+	u8 cck_tx_power_index_B[3];
+	u8 ht40_1s_tx_power_index_A[3];				/* 0x60 */
+	u8 ht40_1s_tx_power_index_B[3];
+	/*
+	 * The following entries are half-bytes split as:
+	 * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
+	 */
+	struct rtl8723au_idx ht40_2s_tx_power_index_diff[3];
+	struct rtl8723au_idx ht20_tx_power_index_diff[3];	/* 0x69 */
+	struct rtl8723au_idx ofdm_tx_power_index_diff[3];
+	struct rtl8723au_idx ht40_max_power_offset[3];		/* 0x6f */
+	struct rtl8723au_idx ht20_max_power_offset[3];
+	u8 channel_plan;					/* 0x75 */
+	u8 tssi_a;
+	u8 tssi_b;
+	u8 thermal_meter;	/* xtal_k */			/* 0x78 */
+	u8 rf_regulatory;
+	u8 rf_option_2;
+	u8 rf_option_3;
+	u8 rf_option_4;
+	u8 res5[1];						/* 0x7d */
+	u8 version;
+	u8 customer_id;
+};
+
+struct rtl8xxxu_reg8val {
+	u16 reg;
+	u8 val;
+};
+
+struct rtl8xxxu_reg32val {
+	u16 reg;
+	u32 val;
+};
+
+struct rtl8xxxu_rfregval {
+	u8 reg;
+	u32 val;
+};
+
+enum rtl8xxxu_rfpath {
+	RF_A = 0,
+	RF_B = 1,
+};
+
+struct rtl8xxxu_rfregs {
+	u16 hssiparm1;
+	u16 hssiparm2;
+	u16 lssiparm;
+	u16 hspiread;
+	u16 lssiread;
+	u16 rf_sw_ctrl;
+};
+
+#define H2C_MAX_MBOX			4
+#define H2C_EXT				BIT(7)
+#define H2C_SET_POWER_MODE		1
+#define H2C_JOIN_BSS_REPORT		2
+#define  H2C_JOIN_BSS_DISCONNECT	0
+#define  H2C_JOIN_BSS_CONNECT		1
+#define H2C_SET_RSSI			5
+#define H2C_SET_RATE_MASK		(6 | H2C_EXT)
+
+struct h2c_cmd {
+	union {
+		struct {
+			u8 cmd;
+			u8 data[5];
+		} __packed cmd;
+		struct {
+			__le32 data;
+			__le16 ext;
+		} __packed raw;
+		struct {
+			u8 cmd;
+			u8 data;
+			u8 pad[4];
+		} __packed joinbss;
+		struct {
+			u8 cmd;
+			__le16 mask_hi;
+			u8 arg;
+			__le16 mask_lo;
+		} __packed ramask;
+	};
+};
+
+struct rtl8xxxu_fileops;
+
+struct rtl8xxxu_priv {
+	struct ieee80211_hw *hw;
+	struct usb_device *udev;
+	struct rtl8xxxu_fileops *fops;
+
+	spinlock_t tx_urb_lock;
+	struct list_head tx_urb_free_list;
+	int tx_urb_free_count;
+	bool tx_stopped;
+
+	spinlock_t rx_urb_lock;
+	struct list_head rx_urb_pending_list;
+	int rx_urb_pending_count;
+	bool shutdown;
+	struct work_struct rx_urb_wq;
+
+	u8 mac_addr[ETH_ALEN];
+	char chip_name[8];
+	u8 cck_tx_power_index_A[3];	/* 0x10 */
+	u8 cck_tx_power_index_B[3];
+	u8 ht40_1s_tx_power_index_A[3];	/* 0x16 */
+	u8 ht40_1s_tx_power_index_B[3];
+	/*
+	 * The following entries are half-bytes split as:
+	 * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed
+	 */
+	struct rtl8723au_idx ht40_2s_tx_power_index_diff[3];
+	struct rtl8723au_idx ht20_tx_power_index_diff[3];
+	struct rtl8723au_idx ofdm_tx_power_index_diff[3];
+	struct rtl8723au_idx ht40_max_power_offset[3];
+	struct rtl8723au_idx ht20_max_power_offset[3];
+	u32 chip_cut:4;
+	u32 rom_rev:4;
+	u32 has_wifi:1;
+	u32 has_bluetooth:1;
+	u32 enable_bluetooth:1;
+	u32 has_gps:1;
+	u32 hi_pa:1;
+	u32 vendor_umc:1;
+	u32 has_polarity_ctrl:1;
+	u32 has_eeprom:1;
+	u32 boot_eeprom:1;
+	u32 ep_tx_high_queue:1;
+	u32 ep_tx_normal_queue:1;
+	u32 ep_tx_low_queue:1;
+	u32 path_a_hi_power:1;
+	u32 path_a_rf_paths:4;
+	unsigned int pipe_interrupt;
+	unsigned int pipe_in;
+	unsigned int pipe_out[TXDESC_QUEUE_MAX];
+	u8 out_ep[RTL8XXXU_OUT_ENDPOINTS];
+	u8 path_a_ig_value;
+	u8 ep_tx_count;
+	u8 rf_paths;
+	u8 rx_paths;
+	u8 tx_paths;
+	u32 rf_mode_ag[2];
+	u32 rege94;
+	u32 rege9c;
+	u32 regeb4;
+	u32 regebc;
+	int next_mbox;
+	int nr_out_eps;
+
+	struct mutex h2c_mutex;
+
+	struct usb_anchor rx_anchor;
+	struct usb_anchor tx_anchor;
+	struct usb_anchor int_anchor;
+	struct rtl8xxxu_firmware_header *fw_data;
+	size_t fw_size;
+	struct mutex usb_buf_mutex;
+	union {
+		__le32 val32;
+		__le16 val16;
+		u8 val8;
+	} usb_buf;
+	union {
+		u8 raw[EFUSE_MAP_LEN_8723A];
+		struct rtl8723au_efuse efuse8723;
+		struct rtl8192cu_efuse efuse8192;
+	} efuse_wifi;
+	u32 adda_backup[RTL8XXXU_ADDA_REGS];
+	u32 mac_backup[RTL8XXXU_MAC_REGS];
+	u32 bb_backup[RTL8XXXU_BB_REGS];
+	u32 bb_recovery_backup[RTL8XXXU_BB_REGS];
+	u32 rtlchip;
+	u8 pi_enabled:1;
+	u8 iqk_initialized:1;
+	u8 int_buf[USB_INTR_CONTENT_LENGTH];
+};
+
+struct rtl8xxxu_rx_urb {
+	struct urb urb;
+	struct ieee80211_hw *hw;
+	struct list_head list;
+};
+
+struct rtl8xxxu_tx_urb {
+	struct urb urb;
+	struct ieee80211_hw *hw;
+	struct list_head list;
+};
+
+struct rtl8xxxu_fileops {
+	int (*parse_efuse) (struct rtl8xxxu_priv *priv);
+	int (*load_firmware) (struct rtl8xxxu_priv *priv);
+	int (*power_on) (struct rtl8xxxu_priv *priv);
+	int writeN_block_size;
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
new file mode 100644
index 0000000..23208f7
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -0,0 +1,981 @@
+/*
+ * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Register definitions taken from original Realtek rtl8723au driver
+ */
+
+/* 0x0000 ~ 0x00FF	System Configuration */
+#define REG_SYS_ISO_CTRL		0x0000
+#define  SYS_ISO_MD2PP			BIT(0)
+#define  SYS_ISO_ANALOG_IPS		BIT(5)
+#define  SYS_ISO_DIOR			BIT(9)
+#define  SYS_ISO_PWC_EV25V		BIT(14)
+#define  SYS_ISO_PWC_EV12V		BIT(15)
+
+#define REG_SYS_FUNC			0x0002
+#define  SYS_FUNC_BBRSTB		BIT(0)
+#define  SYS_FUNC_BB_GLB_RSTN		BIT(1)
+#define  SYS_FUNC_USBA			BIT(2)
+#define  SYS_FUNC_UPLL			BIT(3)
+#define  SYS_FUNC_USBD			BIT(4)
+#define  SYS_FUNC_DIO_PCIE		BIT(5)
+#define  SYS_FUNC_PCIEA			BIT(6)
+#define  SYS_FUNC_PPLL			BIT(7)
+#define  SYS_FUNC_PCIED			BIT(8)
+#define  SYS_FUNC_DIOE			BIT(9)
+#define  SYS_FUNC_CPU_ENABLE		BIT(10)
+#define  SYS_FUNC_DCORE			BIT(11)
+#define  SYS_FUNC_ELDR			BIT(12)
+#define  SYS_FUNC_DIO_RF		BIT(13)
+#define  SYS_FUNC_HWPDN			BIT(14)
+#define  SYS_FUNC_MREGEN		BIT(15)
+
+#define REG_APS_FSMCO			0x0004
+#define  APS_FSMCO_PFM_ALDN		BIT(1)
+#define  APS_FSMCO_PFM_WOWL		BIT(3)
+#define  APS_FSMCO_ENABLE_POWERDOWN	BIT(4)
+#define  APS_FSMCO_MAC_ENABLE		BIT(8)
+#define  APS_FSMCO_MAC_OFF		BIT(9)
+#define  APS_FSMCO_HW_SUSPEND		BIT(11)
+#define  APS_FSMCO_PCIE			BIT(12)
+#define  APS_FSMCO_HW_POWERDOWN		BIT(15)
+#define  APS_FSMCO_WLON_RESET		BIT(16)
+
+#define REG_SYS_CLKR			0x0008
+#define  SYS_CLK_ANAD16V_ENABLE		BIT(0)
+#define  SYS_CLK_ANA8M			BIT(1)
+#define  SYS_CLK_MACSLP			BIT(4)
+#define  SYS_CLK_LOADER_ENABLE		BIT(5)
+#define  SYS_CLK_80M_SSC_DISABLE	BIT(7)
+#define  SYS_CLK_80M_SSC_ENABLE_HO	BIT(8)
+#define  SYS_CLK_PHY_SSC_RSTB		BIT(9)
+#define  SYS_CLK_SEC_CLK_ENABLE		BIT(10)
+#define  SYS_CLK_MAC_CLK_ENABLE		BIT(11)
+#define  SYS_CLK_ENABLE			BIT(12)
+#define  SYS_CLK_RING_CLK_ENABLE	BIT(13)
+
+#define REG_9346CR			0x000a
+#define  EEPROM_BOOT			BIT(4)
+#define  EEPROM_ENABLE			BIT(5)
+
+#define REG_EE_VPD			0x000c
+#define REG_AFE_MISC			0x0010
+#define REG_SPS0_CTRL			0x0011
+#define REG_SPS_OCP_CFG			0x0018
+#define REG_RSV_CTRL			0x001c
+
+#define REG_RF_CTRL			0x001f
+#define  RF_ENABLE			BIT(0)
+#define  RF_RSTB			BIT(1)
+#define  RF_SDMRSTB			BIT(2)
+
+#define REG_LDOA15_CTRL			0x0020
+#define  LDOA15_ENABLE			BIT(0)
+#define  LDOA15_STANDBY			BIT(1)
+#define  LDOA15_OBUF			BIT(2)
+#define  LDOA15_REG_VOS			BIT(3)
+#define  LDOA15_VOADJ_SHIFT		4
+
+#define REG_LDOV12D_CTRL		0x0021
+#define  LDOV12D_ENABLE			BIT(0)
+#define  LDOV12D_STANDBY		BIT(1)
+#define  LDOV12D_VADJ_SHIFT		4
+
+#define REG_LDOHCI12_CTRL		0x0022
+
+#define REG_LPLDO_CTRL			0x0023
+#define  LPLDO_HSM			BIT(2)
+#define  LPLDO_LSM_DIS			BIT(3)
+
+#define REG_AFE_XTAL_CTRL		0x0024
+#define  AFE_XTAL_ENABLE		BIT(0)
+#define  AFE_XTAL_B_SELECT		BIT(1)
+#define  AFE_XTAL_GATE_USB		BIT(8)
+#define  AFE_XTAL_GATE_AFE		BIT(11)
+#define  AFE_XTAL_RF_GATE		BIT(14)
+#define  AFE_XTAL_GATE_DIG		BIT(17)
+#define  AFE_XTAL_BT_GATE		BIT(20)
+
+#define REG_AFE_PLL_CTRL		0x0028
+#define  AFE_PLL_ENABLE			BIT(0)
+#define  AFE_PLL_320_ENABLE		BIT(1)
+#define  APE_PLL_FREF_SELECT		BIT(2)
+#define  AFE_PLL_EDGE_SELECT		BIT(3)
+#define  AFE_PLL_WDOGB			BIT(4)
+#define  AFE_PLL_LPF_ENABLE		BIT(5)
+
+#define REG_MAC_PHY_CTRL		0x002c
+
+#define REG_EFUSE_CTRL			0x0030
+#define REG_EFUSE_TEST			0x0034
+#define  EFUSE_TRPT			BIT(7)
+	/*  00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */
+#define  EFUSE_CELL_SEL			(BIT(8) | BIT(9))
+#define  EFUSE_LDOE25_ENABLE		BIT(31)
+#define  EFUSE_SELECT_MASK		0x0300
+#define  EFUSE_WIFI_SELECT		0x0000
+#define  EFUSE_BT0_SELECT		0x0100
+#define  EFUSE_BT1_SELECT		0x0200
+#define  EFUSE_BT2_SELECT		0x0300
+
+#define  EFUSE_ACCESS_ENABLE		0x69	/* RTL8723 only */
+#define  EFUSE_ACCESS_DISABLE		0x00	/* RTL8723 only */
+
+#define REG_PWR_DATA			0x0038
+#define REG_CAL_TIMER			0x003c
+#define REG_ACLK_MON			0x003e
+#define REG_GPIO_MUXCFG			0x0040
+#define REG_GPIO_IO_SEL			0x0042
+#define REG_MAC_PINMUX_CFG		0x0043
+#define REG_GPIO_PIN_CTRL		0x0044
+#define REG_GPIO_INTM			0x0048
+#define REG_LEDCFG0			0x004c
+#define REG_LEDCFG1			0x004d
+#define REG_LEDCFG2			0x004e
+#define  LEDCFG2_DPDT_SELECT		BIT(7)
+#define REG_LEDCFG3			0x004f
+#define REG_LEDCFG			REG_LEDCFG2
+#define REG_FSIMR			0x0050
+#define REG_FSISR			0x0054
+#define REG_HSIMR			0x0058
+#define REG_HSISR			0x005c
+/*  RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */
+#define REG_GPIO_PIN_CTRL_2		0x0060
+/*  RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */
+#define REG_GPIO_IO_SEL_2		0x0062
+
+/*  RTL8723 only WIFI/BT/GPS Multi-Function control source. */
+#define REG_MULTI_FUNC_CTRL		0x0068
+
+#define  MULTI_FN_WIFI_HW_PWRDOWN_EN	BIT(0)	/* Enable GPIO[9] as WiFi HW
+						   powerdown source */
+#define  MULTI_FN_WIFI_HW_PWRDOWN_SL	BIT(1)	/* WiFi HW powerdown polarity
+						   control */
+#define  MULTI_WIFI_FUNC_EN		BIT(2)	/* WiFi function enable */
+
+#define  MULTI_WIFI_HW_ROF_EN		BIT(3)	/* Enable GPIO[9] as WiFi RF HW
+						   powerdown source */
+#define  MULTI_BT_HW_PWRDOWN_EN		BIT(16)	/* Enable GPIO[11] as BT HW
+						   powerdown source */
+#define  MULTI_BT_HW_PWRDOWN_SL		BIT(17)	/* BT HW powerdown polarity
+						   control */
+#define  MULTI_BT_FUNC_EN		BIT(18)	/* BT function enable */
+#define  MULTI_BT_HW_ROF_EN		BIT(19)	/* Enable GPIO[11] as BT/GPS
+						   RF HW powerdown source */
+#define  MULTI_GPS_HW_PWRDOWN_EN	BIT(20)	/* Enable GPIO[10] as GPS HW
+						   powerdown source */
+#define  MULTI_GPS_HW_PWRDOWN_SL	BIT(21)	/* GPS HW powerdown polarity
+						   control */
+#define  MULTI_GPS_FUNC_EN		BIT(22)	/* GPS function enable */
+
+#define REG_MCU_FW_DL			0x0080
+#define  MCU_FW_DL_ENABLE		BIT(0)
+#define  MCU_FW_DL_READY		BIT(1)
+#define  MCU_FW_DL_CSUM_REPORT		BIT(2)
+#define  MCU_MAC_INIT_READY		BIT(3)
+#define  MCU_BB_INIT_READY		BIT(4)
+#define  MCU_RF_INIT_READY		BIT(5)
+#define  MCU_WINT_INIT_READY		BIT(6)
+#define  MCU_FW_RAM_SEL			BIT(7)	/* 1: RAM, 0:ROM */
+#define  MCU_CP_RESET			BIT(23)
+
+#define REG_HMBOX_EXT_0			0x0088
+#define REG_HMBOX_EXT_1			0x008a
+#define REG_HMBOX_EXT_2			0x008c
+#define REG_HMBOX_EXT_3			0x008e
+/*  Host suspend counter on FPGA platform */
+#define REG_HOST_SUSP_CNT		0x00bc
+/*  Efuse access protection for RTL8723 */
+#define REG_EFUSE_ACCESS		0x00cf
+#define REG_BIST_SCAN			0x00d0
+#define REG_BIST_RPT			0x00d4
+#define REG_BIST_ROM_RPT		0x00d8
+#define REG_USB_SIE_INTF		0x00e0
+#define REG_PCIE_MIO_INTF		0x00e4
+#define REG_PCIE_MIO_INTD		0x00e8
+#define REG_HPON_FSM			0x00ec
+#define  HPON_FSM_BONDING_MASK		(BIT(22) | BIT(23))
+#define  HPON_FSM_BONDING_1T2R		BIT(22)
+#define REG_SYS_CFG			0x00f0
+#define  SYS_CFG_XCLK_VLD		BIT(0)
+#define  SYS_CFG_ACLK_VLD		BIT(1)
+#define  SYS_CFG_UCLK_VLD		BIT(2)
+#define  SYS_CFG_PCLK_VLD		BIT(3)
+#define  SYS_CFG_PCIRSTB		BIT(4)
+#define  SYS_CFG_V15_VLD		BIT(5)
+#define  SYS_CFG_TRP_B15V_EN		BIT(7)
+#define  SYS_CFG_SIC_IDLE		BIT(8)
+#define  SYS_CFG_BD_MAC2		BIT(9)
+#define  SYS_CFG_BD_MAC1		BIT(10)
+#define  SYS_CFG_IC_MACPHY_MODE		BIT(11)
+#define  SYS_CFG_CHIP_VER		(BIT(12) | BIT(13) | BIT(14) | BIT(15))
+#define  SYS_CFG_BT_FUNC		BIT(16)
+#define  SYS_CFG_VENDOR_ID		BIT(19)
+#define  SYS_CFG_PAD_HWPD_IDN		BIT(22)
+#define  SYS_CFG_TRP_VAUX_EN		BIT(23)
+#define  SYS_CFG_TRP_BT_EN		BIT(24)
+#define  SYS_CFG_BD_PKG_SEL		BIT(25)
+#define  SYS_CFG_BD_HCI_SEL		BIT(26)
+#define  SYS_CFG_TYPE_ID		BIT(27)
+#define  SYS_CFG_RTL_ID			BIT(23) /*  TestChip ID,
+						    1:Test(RLE); 0:MP(RL) */
+#define  SYS_CFG_SPS_SEL		BIT(24) /*  1:LDO regulator mode;
+						    0:Switching regulator mode*/
+#define  SYS_CFG_CHIP_VERSION_MASK	0xf000	/* Bit 12 - 15 */
+#define  SYS_CFG_CHIP_VERSION_SHIFT	12
+
+#define REG_GPIO_OUTSTS			0x00f4	/*  For RTL8723 only. */
+#define  GPIO_EFS_HCI_SEL		(BIT(0) | BIT(1))
+#define  GPIO_PAD_HCI_SEL		(BIT(2) | BIT(3))
+#define  GPIO_HCI_SEL			(BIT(4) | BIT(5))
+#define  GPIO_PKG_SEL_HCI		BIT(6)
+#define  GPIO_FEN_GPS			BIT(7)
+#define  GPIO_FEN_BT			BIT(8)
+#define  GPIO_FEN_WL			BIT(9)
+#define  GPIO_FEN_PCI			BIT(10)
+#define  GPIO_FEN_USB			BIT(11)
+#define  GPIO_BTRF_HWPDN_N		BIT(12)
+#define  GPIO_WLRF_HWPDN_N		BIT(13)
+#define  GPIO_PDN_BT_N			BIT(14)
+#define  GPIO_PDN_GPS_N			BIT(15)
+#define  GPIO_BT_CTL_HWPDN		BIT(16)
+#define  GPIO_GPS_CTL_HWPDN		BIT(17)
+#define  GPIO_PPHY_SUSB			BIT(20)
+#define  GPIO_UPHY_SUSB			BIT(21)
+#define  GPIO_PCI_SUSEN			BIT(22)
+#define  GPIO_USB_SUSEN			BIT(23)
+#define  GPIO_RF_RL_ID			(BIT(31) | BIT(30) | BIT(29) | BIT(28))
+
+/* 0x0100 ~ 0x01FF	MACTOP General Configuration */
+#define REG_CR				0x0100
+#define  CR_HCI_TXDMA_ENABLE		BIT(0)
+#define  CR_HCI_RXDMA_ENABLE		BIT(1)
+#define  CR_TXDMA_ENABLE		BIT(2)
+#define  CR_RXDMA_ENABLE		BIT(3)
+#define  CR_PROTOCOL_ENABLE		BIT(4)
+#define  CR_SCHEDULE_ENABLE		BIT(5)
+#define  CR_MAC_TX_ENABLE		BIT(6)
+#define  CR_MAC_RX_ENABLE		BIT(7)
+#define  CR_SW_BEACON_ENABLE		BIT(8)
+#define  CR_SECURITY_ENABLE		BIT(9)
+#define  CR_CALTIMER_ENABLE		BIT(10)
+
+/* Media Status Register */
+#define REG_MSR				0x0102
+#define  MSR_LINKTYPE_MASK		0x3
+#define  MSR_LINKTYPE_NONE		0x0
+#define  MSR_LINKTYPE_ADHOC		0x1
+#define  MSR_LINKTYPE_STATION		0x2
+#define  MSR_LINKTYPE_AP		0x3
+
+#define REG_PBP				0x0104
+#define  PBP_PAGE_SIZE_RX_SHIFT		0
+#define  PBP_PAGE_SIZE_TX_SHIFT		4
+#define  PBP_PAGE_SIZE_64		0x0
+#define  PBP_PAGE_SIZE_128		0x1
+#define  PBP_PAGE_SIZE_256		0x2
+#define  PBP_PAGE_SIZE_512		0x3
+#define  PBP_PAGE_SIZE_1024		0x4
+
+#define REG_TRXDMA_CTRL			0x010c
+#define  TRXDMA_CTRL_VOQ_SHIFT		4
+#define  TRXDMA_CTRL_VIQ_SHIFT		6
+#define  TRXDMA_CTRL_BEQ_SHIFT		8
+#define  TRXDMA_CTRL_BKQ_SHIFT		10
+#define  TRXDMA_CTRL_MGQ_SHIFT		12
+#define  TRXDMA_CTRL_HIQ_SHIFT		14
+#define  TRXDMA_QUEUE_LOW		1
+#define  TRXDMA_QUEUE_NORMAL		2
+#define  TRXDMA_QUEUE_HIGH		3
+
+#define REG_TRXFF_BNDY			0x0114
+#define REG_TRXFF_STATUS		0x0118
+#define REG_RXFF_PTR			0x011c
+#define REG_HIMR			0x0120
+#define REG_HISR			0x0124
+#define REG_HIMRE			0x0128
+#define REG_HISRE			0x012c
+#define REG_CPWM			0x012f
+#define REG_FWIMR			0x0130
+#define REG_FWISR			0x0134
+#define REG_PKTBUF_DBG_CTRL		0x0140
+#define REG_PKTBUF_DBG_DATA_L		0x0144
+#define REG_PKTBUF_DBG_DATA_H		0x0148
+
+#define REG_TC0_CTRL			0x0150
+#define REG_TC1_CTRL			0x0154
+#define REG_TC2_CTRL			0x0158
+#define REG_TC3_CTRL			0x015c
+#define REG_TC4_CTRL			0x0160
+#define REG_TCUNIT_BASE			0x0164
+#define REG_MBIST_START			0x0174
+#define REG_MBIST_DONE			0x0178
+#define REG_MBIST_FAIL			0x017c
+#define REG_C2HEVT_MSG_NORMAL		0x01a0
+#define REG_C2HEVT_CLEAR		0x01af
+#define REG_C2HEVT_MSG_TEST		0x01b8
+#define REG_MCUTST_1			0x01c0
+#define REG_FMTHR			0x01c8
+#define REG_HMTFR			0x01cc
+#define REG_HMBOX_0			0x01d0
+#define REG_HMBOX_1			0x01d4
+#define REG_HMBOX_2			0x01d8
+#define REG_HMBOX_3			0x01dc
+
+#define REG_LLT_INIT			0x01e0
+#define  LLT_OP_INACTIVE		0x0
+#define  LLT_OP_WRITE			(0x1 << 30)
+#define  LLT_OP_READ			(0x2 << 30)
+#define  LLT_OP_MASK			(0x3 << 30)
+
+#define REG_BB_ACCEESS_CTRL		0x01e8
+#define REG_BB_ACCESS_DATA		0x01ec
+
+/* 0x0200 ~ 0x027F	TXDMA Configuration */
+#define REG_RQPN			0x0200
+#define  RQPN_HI_PQ_SHIFT		0
+#define  RQPN_LO_PQ_SHIFT		8
+#define  RQPN_NORM_PQ_SHIFT		16
+#define  RQPN_LOAD			BIT(31)
+
+#define REG_FIFOPAGE			0x0204
+#define REG_TDECTRL			0x0208
+#define REG_TXDMA_OFFSET_CHK		0x020c
+#define REG_TXDMA_STATUS		0x0210
+#define REG_RQPN_NPQ			0x0214
+
+/* 0x0280 ~ 0x02FF	RXDMA Configuration */
+#define REG_RXDMA_AGG_PG_TH		0x0280
+#define REG_RXPKT_NUM			0x0284
+#define REG_RXDMA_STATUS		0x0288
+
+#define REG_RF_BB_CMD_ADDR		0x02c0
+#define REG_RF_BB_CMD_DATA		0x02c4
+
+/*  spec version 11 */
+/* 0x0400 ~ 0x047F	Protocol Configuration */
+#define REG_VOQ_INFORMATION		0x0400
+#define REG_VIQ_INFORMATION		0x0404
+#define REG_BEQ_INFORMATION		0x0408
+#define REG_BKQ_INFORMATION		0x040c
+#define REG_MGQ_INFORMATION		0x0410
+#define REG_HGQ_INFORMATION		0x0414
+#define REG_BCNQ_INFORMATION		0x0418
+
+#define REG_CPU_MGQ_INFORMATION		0x041c
+#define REG_FWHW_TXQ_CTRL		0x0420
+#define  FWHW_TXQ_CTRL_AMPDU_RETRY	BIT(7)
+#define  FWHW_TXQ_CTRL_XMIT_MGMT_ACK	BIT(12)
+
+#define REG_HWSEQ_CTRL			0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY		0x0424
+#define REG_TXPKTBUF_MGQ_BDNY		0x0425
+#define REG_LIFETIME_EN			0x0426
+#define REG_MULTI_BCNQ_OFFSET		0x0427
+
+#define REG_SPEC_SIFS			0x0428
+#define  SPEC_SIFS_CCK_MASK		0x00ff
+#define  SPEC_SIFS_CCK_SHIFT		0
+#define  SPEC_SIFS_OFDM_MASK		0xff00
+#define  SPEC_SIFS_OFDM_SHIFT		8
+
+#define REG_RETRY_LIMIT			0x042a
+#define  RETRY_LIMIT_LONG_SHIFT		0
+#define  RETRY_LIMIT_LONG_MASK		0x003f
+#define  RETRY_LIMIT_SHORT_SHIFT	8
+#define  RETRY_LIMIT_SHORT_MASK		0x3f00
+
+#define REG_DARFRC			0x0430
+#define REG_RARFRC			0x0438
+#define REG_RESPONSE_RATE_SET		0x0440
+#define  RESPONSE_RATE_BITMAP_ALL	0xfffff
+#define  RESPONSE_RATE_RRSR_CCK_ONLY_1M	0xffff1
+#define  RSR_1M				BIT(0)
+#define  RSR_2M				BIT(1)
+#define  RSR_5_5M			BIT(2)
+#define  RSR_11M			BIT(3)
+#define  RSR_6M				BIT(4)
+#define  RSR_9M				BIT(5)
+#define  RSR_12M			BIT(6)
+#define  RSR_18M			BIT(7)
+#define  RSR_24M			BIT(8)
+#define  RSR_36M			BIT(9)
+#define  RSR_48M			BIT(10)
+#define  RSR_54M			BIT(11)
+#define  RSR_MCS0			BIT(12)
+#define  RSR_MCS1			BIT(13)
+#define  RSR_MCS2			BIT(14)
+#define  RSR_MCS3			BIT(15)
+#define  RSR_MCS4			BIT(16)
+#define  RSR_MCS5			BIT(17)
+#define  RSR_MCS6			BIT(18)
+#define  RSR_MCS7			BIT(19)
+#define  RSR_RSC_LOWER_SUB_CHANNEL	BIT(21)	/* 0x200000 */
+#define  RSR_RSC_UPPER_SUB_CHANNEL	BIT(22)	/* 0x400000 */
+#define  RSR_RSC_BANDWIDTH_40M		(RSR_RSC_UPPER_SUB_CHANNEL | \
+					 RSR_RSC_LOWER_SUB_CHANNEL)
+#define  RSR_ACK_SHORT_PREAMBLE		BIT(23)
+
+#define REG_ARFR0			0x0444
+#define REG_ARFR1			0x0448
+#define REG_ARFR2			0x044c
+#define REG_ARFR3			0x0450
+#define REG_AGGLEN_LMT			0x0458
+#define REG_AMPDU_MIN_SPACE		0x045c
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD	0x045d
+#define REG_FAST_EDCA_CTRL		0x0460
+#define REG_RD_RESP_PKT_TH		0x0463
+#define REG_INIRTS_RATE_SEL		0x0480
+#define REG_INIDATA_RATE_SEL		0x0484
+
+#define REG_POWER_STATUS		0x04a4
+#define REG_POWER_STAGE1		0x04b4
+#define REG_POWER_STAGE2		0x04b8
+#define REG_PKT_VO_VI_LIFE_TIME		0x04c0
+#define REG_PKT_BE_BK_LIFE_TIME		0x04c2
+#define REG_STBC_SETTING		0x04c4
+#define REG_PROT_MODE_CTRL		0x04c8
+#define REG_MAX_AGGR_NUM		0x04ca
+#define REG_RTS_MAX_AGGR_NUM		0x04cb
+#define REG_BAR_MODE_CTRL		0x04cc
+#define REG_RA_TRY_RATE_AGG_LMT		0x04cf
+#define REG_NQOS_SEQ			0x04dc
+#define REG_QOS_SEQ			0x04de
+#define REG_NEED_CPU_HANDLE		0x04e0
+#define REG_PKT_LOSE_RPT		0x04e1
+#define REG_PTCL_ERR_STATUS		0x04e2
+#define REG_DUMMY			0x04fc
+
+/* 0x0500 ~ 0x05FF	EDCA Configuration */
+#define REG_EDCA_VO_PARAM		0x0500
+#define REG_EDCA_VI_PARAM		0x0504
+#define REG_EDCA_BE_PARAM		0x0508
+#define REG_EDCA_BK_PARAM		0x050c
+#define  EDCA_PARAM_ECW_MIN_SHIFT	8
+#define  EDCA_PARAM_ECW_MAX_SHIFT	12
+#define  EDCA_PARAM_TXOP_SHIFT		16
+#define REG_BEACON_TCFG			0x0510
+#define REG_PIFS			0x0512
+#define REG_RDG_PIFS			0x0513
+#define REG_SIFS_CCK			0x0514
+#define REG_SIFS_OFDM			0x0516
+#define REG_TSFTR_SYN_OFFSET		0x0518
+#define REG_AGGR_BREAK_TIME		0x051a
+#define REG_SLOT			0x051b
+#define REG_TX_PTCL_CTRL		0x0520
+#define REG_TXPAUSE			0x0522
+#define REG_DIS_TXREQ_CLR		0x0523
+#define REG_RD_CTRL			0x0524
+#define REG_TBTT_PROHIBIT		0x0540
+#define REG_RD_NAV_NXT			0x0544
+#define REG_NAV_PROT_LEN		0x0546
+
+#define REG_BEACON_CTRL			0x0550
+#define REG_BEACON_CTRL_1		0x0551
+#define  BEACON_ATIM			BIT(0)
+#define  BEACON_CTRL_MBSSID		BIT(1)
+#define  BEACON_CTRL_TX_BEACON_RPT	BIT(2)
+#define  BEACON_FUNCTION_ENABLE		BIT(3)
+#define  BEACON_DISABLE_TSF_UPDATE	BIT(4)
+
+#define REG_MBID_NUM			0x0552
+#define REG_DUAL_TSF_RST		0x0553
+#define  DUAL_TSF_RESET_TSF0		BIT(0)
+#define  DUAL_TSF_RESET_TSF1		BIT(1)
+#define  DUAL_TSF_RESET_P2P		BIT(4)
+#define  DUAL_TSF_TX_OK			BIT(5)
+
+/*  The same as REG_MBSSID_BCN_SPACE */
+#define REG_BCN_INTERVAL		0x0554
+#define REG_MBSSID_BCN_SPACE		0x0554
+
+#define REG_DRIVER_EARLY_INT		0x0558
+#define  DRIVER_EARLY_INT_TIME		5
+
+#define REG_BEACON_DMA_TIME		0x0559
+#define  BEACON_DMA_ATIME_INT_TIME	2
+
+#define REG_ATIMWND			0x055a
+#define REG_BCN_MAX_ERR			0x055d
+#define REG_RXTSF_OFFSET_CCK		0x055e
+#define REG_RXTSF_OFFSET_OFDM		0x055f
+#define REG_TSFTR			0x0560
+#define REG_TSFTR1			0x0568
+#define REG_INIT_TSFTR			0x0564
+#define REG_ATIMWND_1			0x0570
+#define REG_PSTIMER			0x0580
+#define REG_TIMER0			0x0584
+#define REG_TIMER1			0x0588
+#define REG_ACM_HW_CTRL			0x05c0
+#define  ACM_HW_CTRL_BK			BIT(0)
+#define  ACM_HW_CTRL_BE			BIT(1)
+#define  ACM_HW_CTRL_VI			BIT(2)
+#define  ACM_HW_CTRL_VO			BIT(3)
+#define REG_ACM_RST_CTRL		0x05c1
+#define REG_ACMAVG			0x05c2
+#define REG_VO_ADMTIME			0x05c4
+#define REG_VI_ADMTIME			0x05c6
+#define REG_BE_ADMTIME			0x05c8
+#define REG_EDCA_RANDOM_GEN		0x05cc
+#define REG_SCH_TXCMD			0x05d0
+
+/* define REG_FW_TSF_SYNC_CNT		0x04a0 */
+#define REG_FW_RESET_TSF_CNT_1		0x05fc
+#define REG_FW_RESET_TSF_CNT_0		0x05fd
+#define REG_FW_BCN_DIS_CNT		0x05fe
+
+/* 0x0600 ~ 0x07FF  WMAC Configuration */
+#define REG_APSD_CTRL			0x0600
+#define  APSD_CTRL_OFF			BIT(6)
+#define  APSD_CTRL_OFF_STATUS		BIT(7)
+#define REG_BW_OPMODE			0x0603
+#define  BW_OPMODE_20MHZ		BIT(2)
+#define  BW_OPMODE_5G			BIT(1)
+#define  BW_OPMODE_11J			BIT(0)
+
+#define REG_TCR				0x0604
+
+/* Receive Configuration Register */
+#define REG_RCR				0x0608
+#define  RCR_ACCEPT_AP			BIT(0)  /* Accept all unicast packet */
+#define  RCR_ACCEPT_PHYS_MATCH		BIT(1)  /* Accept phys match packet */
+#define  RCR_ACCEPT_MCAST		BIT(2)
+#define  RCR_ACCEPT_BCAST		BIT(3)
+#define  RCR_ACCEPT_ADDR3		BIT(4)  /* Accept address 3 match
+						 packet */
+#define  RCR_ACCEPT_PM			BIT(5)  /* Accept power management
+						 packet */
+#define  RCR_CHECK_BSSID_MATCH		BIT(6)  /* Accept BSSID match packet */
+#define  RCR_CHECK_BSSID_BEACON		BIT(7)  /* Accept BSSID match packet
+						 (Rx beacon, probe rsp) */
+#define  RCR_ACCEPT_CRC32		BIT(8)  /* Accept CRC32 error packet */
+#define  RCR_ACCEPT_ICV			BIT(9)  /* Accept ICV error packet */
+#define  RCR_ACCEPT_DATA_FRAME		BIT(11)
+#define  RCR_ACCEPT_CTRL_FRAME		BIT(12)
+#define  RCR_ACCEPT_MGMT_FRAME		BIT(13)
+#define  RCR_HTC_LOC_CTRL		BIT(14) /* MFC<--HTC=1 MFC-->HTC=0 */
+#define  RCR_MFBEN			BIT(22)
+#define  RCR_LSIGEN			BIT(23)
+#define  RCR_MULTI_BSSID_ENABLE		BIT(24) /* Enable Multiple BssId */
+#define  RCR_ACCEPT_BA_SSN		BIT(27) /* Accept BA SSN */
+#define  RCR_APPEND_PHYSTAT		BIT(28)
+#define  RCR_APPEND_ICV			BIT(29)
+#define  RCR_APPEND_MIC			BIT(30)
+#define  RCR_APPEND_FCS			BIT(31) /* WMAC append FCS after */
+
+#define REG_RX_PKT_LIMIT		0x060c
+#define REG_RX_DLK_TIME			0x060d
+#define REG_RX_DRVINFO_SZ		0x060f
+
+#define REG_MACID			0x0610
+#define REG_BSSID			0x0618
+#define REG_MAR				0x0620
+#define REG_MBIDCAMCFG			0x0628
+
+#define REG_USTIME_EDCA			0x0638
+#define REG_MAC_SPEC_SIFS		0x063a
+
+/*  20100719 Joseph: Hardware register definition change. (HW datasheet v54) */
+	/*  [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
+#define REG_R2T_SIFS			0x063c
+	/*  [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
+#define REG_T2T_SIFS			0x063e
+#define REG_ACKTO			0x0640
+#define REG_CTS2TO			0x0641
+#define REG_EIFS			0x0642
+
+/* WMA, BA, CCX */
+#define REG_NAV_CTRL			0x0650
+/* In units of 128us */
+#define REG_NAV_UPPER			0x0652
+#define  NAV_UPPER_UNIT			128
+
+#define REG_BACAMCMD			0x0654
+#define REG_BACAMCONTENT		0x0658
+#define REG_LBDLY			0x0660
+#define REG_FWDLY			0x0661
+#define REG_RXERR_RPT			0x0664
+#define REG_WMAC_TRXPTCL_CTL		0x0668
+
+/*  Security */
+#define REG_CAM_CMD			0x0670
+#define  CAM_CMD_POLLING		BIT(31)
+#define  CAM_CMD_WRITE			BIT(16)
+#define  CAM_CMD_KEY_SHIFT		3
+#define REG_CAM_WRITE			0x0674
+#define  CAM_WRITE_VALID		BIT(15)
+#define REG_CAM_READ			0x0678
+#define REG_CAM_DEBUG			0x067c
+#define REG_SECURITY_CFG		0x0680
+#define  SEC_CFG_TX_USE_DEFKEY		BIT(0)
+#define  SEC_CFG_RX_USE_DEFKEY		BIT(1)
+#define  SEC_CFG_TX_SEC_ENABLE		BIT(2)
+#define  SEC_CFG_RX_SEC_ENABLE		BIT(3)
+#define  SEC_CFG_SKBYA2			BIT(4)
+#define  SEC_CFG_NO_SKMC		BIT(5)
+#define  SEC_CFG_TXBC_USE_DEFKEY	BIT(6)
+#define  SEC_CFG_RXBC_USE_DEFKEY	BIT(7)
+
+/*  Power */
+#define REG_WOW_CTRL			0x0690
+#define REG_PSSTATUS			0x0691
+#define REG_PS_RX_INFO			0x0692
+#define REG_LPNAV_CTRL			0x0694
+#define REG_WKFMCAM_CMD			0x0698
+#define REG_WKFMCAM_RWD			0x069c
+#define REG_RXFLTMAP0			0x06a0
+#define REG_RXFLTMAP1			0x06a2
+#define REG_RXFLTMAP2			0x06a4
+#define REG_BCN_PSR_RPT			0x06a8
+#define REG_CALB32K_CTRL		0x06ac
+#define REG_PKT_MON_CTRL		0x06b4
+#define REG_BT_COEX_TABLE		0x06c0
+#define REG_WMAC_RESP_TXINFO		0x06d8
+
+#define REG_MACID1			0x0700
+#define REG_BSSID1			0x0708
+
+#define REG_FPGA0_RF_MODE		0x0800
+#define  FPGA_RF_MODE			BIT(0)
+#define  FPGA_RF_MODE_JAPAN		BIT(1)
+#define  FPGA_RF_MODE_CCK		BIT(24)
+#define  FPGA_RF_MODE_OFDM		BIT(25)
+
+#define REG_FPGA0_TX_INFO		0x0804
+#define REG_FPGA0_PSD_FUNC		0x0808
+#define REG_FPGA0_TX_GAIN		0x080c
+#define REG_FPGA0_RF_TIMING1		0x0810
+#define REG_FPGA0_RF_TIMING2		0x0814
+#define REG_FPGA0_POWER_SAVE		0x0818
+#define  FPGA0_PS_LOWER_CHANNEL		BIT(26)
+#define  FPGA0_PS_UPPER_CHANNEL		BIT(27)
+
+#define REG_FPGA0_XA_HSSI_PARM1		0x0820	/* RF 3 wire register */
+#define  FPGA0_HSSI_PARM1_PI		BIT(8)
+#define REG_FPGA0_XA_HSSI_PARM2		0x0824
+#define REG_FPGA0_XB_HSSI_PARM1		0x0828
+#define REG_FPGA0_XB_HSSI_PARM2		0x082c
+#define  FPGA0_HSSI_3WIRE_DATA_LEN	0x800
+#define  FPGA0_HSSI_3WIRE_ADDR_LEN	0x400
+#define  FPGA0_HSSI_PARM2_ADDR_SHIFT	23
+#define  FPGA0_HSSI_PARM2_ADDR_MASK	0x7f800000	/* 0xff << 23 */
+#define  FPGA0_HSSI_PARM2_CCK_HIGH_PWR	BIT(9)
+#define  FPGA0_HSSI_PARM2_EDGE_READ	BIT(31)
+
+#define REG_TX_AGC_B_RATE18_06		0x0830
+#define REG_TX_AGC_B_RATE54_24		0x0834
+#define REG_TX_AGC_B_CCK1_55_MCS32	0x0838
+#define REG_TX_AGC_B_MCS03_MCS00	0x083c
+
+#define REG_FPGA0_XA_LSSI_PARM		0x0840
+#define REG_FPGA0_XB_LSSI_PARM		0x0844
+#define  FPGA0_LSSI_PARM_ADDR_SHIFT	20
+#define  FPGA0_LSSI_PARM_ADDR_MASK	0x0ff00000
+#define  FPGA0_LSSI_PARM_DATA_MASK	0x000fffff
+
+#define REG_TX_AGC_B_MCS07_MCS04	0x0848
+#define REG_TX_AGC_B_MCS11_MCS08	0x084c
+
+#define REG_FPGA0_XCD_SWITCH_CTRL	0x085c
+
+#define REG_FPGA0_XA_RF_INT_OE		0x0860	/* RF Channel switch */
+#define REG_FPGA0_XB_RF_INT_OE		0x0864
+#define  FPGA0_INT_OE_ANTENNA_AB_OPEN	0x000
+#define  FPGA0_INT_OE_ANTENNA_A		BIT(8)
+#define  FPGA0_INT_OE_ANTENNA_B		BIT(9)
+#define  FPGA0_INT_OE_ANTENNA_MASK	(FPGA0_INT_OE_ANTENNA_A | \
+					 FPGA0_INT_OE_ANTENNA_B)
+
+#define REG_TX_AGC_B_MCS15_MCS12	0x0868
+#define REG_TX_AGC_B_CCK11_A_CCK2_11	0x086c
+
+#define REG_FPGA0_XAB_RF_SW_CTRL	0x0870
+#define REG_FPGA0_XA_RF_SW_CTRL		0x0870	/* 16 bit */
+#define REG_FPGA0_XB_RF_SW_CTRL		0x0872	/* 16 bit */
+#define REG_FPGA0_XCD_RF_SW_CTRL	0x0874
+#define REG_FPGA0_XC_RF_SW_CTRL		0x0874	/* 16 bit */
+#define REG_FPGA0_XD_RF_SW_CTRL		0x0876	/* 16 bit */
+#define  FPGA0_RF_3WIRE_DATA		BIT(0)
+#define  FPGA0_RF_3WIRE_CLOC		BIT(1)
+#define  FPGA0_RF_3WIRE_LOAD		BIT(2)
+#define  FPGA0_RF_3WIRE_RW		BIT(3)
+#define  FPGA0_RF_3WIRE_MASK		0xf
+#define  FPGA0_RF_RFENV			BIT(4)
+#define  FPGA0_RF_TRSW			BIT(5)	/* Useless now */
+#define  FPGA0_RF_TRSWB			BIT(6)
+#define  FPGA0_RF_ANTSW			BIT(8)
+#define  FPGA0_RF_ANTSWB		BIT(9)
+#define  FPGA0_RF_PAPE			BIT(10)
+#define  FPGA0_RF_PAPE5G		BIT(11)
+#define  FPGA0_RF_BD_CTRL_SHIFT		16
+
+#define REG_FPGA0_XAB_RF_PARM		0x0878	/* Antenna select path in ODM */
+#define REG_FPGA0_XA_RF_PARM		0x0878	/* 16 bit */
+#define REG_FPGA0_XB_RF_PARM		0x087a	/* 16 bit */
+#define REG_FPGA0_XCD_RF_PARM		0x087c
+#define REG_FPGA0_XC_RF_PARM		0x087c	/* 16 bit */
+#define REG_FPGA0_XD_RF_PARM		0x087e	/* 16 bit */
+#define  FPGA0_RF_PARM_RFA_ENABLE	BIT(1)
+#define  FPGA0_RF_PARM_RFB_ENABLE	BIT(17)
+#define  FPGA0_RF_PARM_CLK_GATE		BIT(31)
+
+#define REG_FPGA0_ANALOG1		0x0880
+#define REG_FPGA0_ANALOG2		0x0884
+#define  FPGA0_ANALOG2_20MHZ		BIT(10)
+#define REG_FPGA0_ANALOG3		0x0888
+#define REG_FPGA0_ANALOG4		0x088c
+
+#define REG_FPGA0_XA_LSSI_READBACK	0x08a0	/* Tranceiver LSSI Readback */
+#define REG_FPGA0_XB_LSSI_READBACK	0x08a4
+#define REG_HSPI_XA_READBACK		0x08b8	/* Transceiver A HSPI read */
+#define REG_HSPI_XB_READBACK		0x08bc	/* Transceiver B HSPI read */
+
+#define REG_FPGA1_RF_MODE		0x0900
+
+#define REG_FPGA1_TX_INFO		0x090c
+
+#define REG_CCK0_SYSTEM			0x0a00
+#define  CCK0_SIDEBAND			BIT(4)
+
+#define REG_CCK0_AFE_SETTING		0x0a04
+
+#define REG_CONFIG_ANT_A		0x0b68
+#define REG_CONFIG_ANT_B		0x0b6c
+
+#define REG_OFDM0_TRX_PATH_ENABLE	0x0c04
+#define OFDM_RF_PATH_RX_MASK		0x0f
+#define OFDM_RF_PATH_RX_A		BIT(0)
+#define OFDM_RF_PATH_RX_B		BIT(1)
+#define OFDM_RF_PATH_RX_C		BIT(2)
+#define OFDM_RF_PATH_RX_D		BIT(3)
+#define OFDM_RF_PATH_TX_MASK		0xf0
+#define OFDM_RF_PATH_TX_A		BIT(4)
+#define OFDM_RF_PATH_TX_B		BIT(5)
+#define OFDM_RF_PATH_TX_C		BIT(6)
+#define OFDM_RF_PATH_TX_D		BIT(7)
+
+#define REG_OFDM0_TR_MUX_PAR		0x0c08
+
+#define REG_OFDM0_XA_RX_IQ_IMBALANCE	0x0c14
+#define REG_OFDM0_XB_RX_IQ_IMBALANCE	0x0c1c
+
+#define REG_OFDM0_ENERGY_CCA_THRES	0x0c4c
+
+#define REG_OFDM0_XA_AGC_CORE1		0x0c50
+#define REG_OFDM0_XA_AGC_CORE2		0x0c54
+#define REG_OFDM0_XB_AGC_CORE1		0x0c58
+#define REG_OFDM0_XB_AGC_CORE2		0x0c5c
+#define REG_OFDM0_XC_AGC_CORE1		0x0c60
+#define REG_OFDM0_XC_AGC_CORE2		0x0c64
+#define REG_OFDM0_XD_AGC_CORE1		0x0c68
+#define REG_OFDM0_XD_AGC_CORE2		0x0c6c
+#define  OFDM0_X_AGC_CORE1_IGI_MASK	0x0000007F
+
+#define REG_OFDM0_AGC_PARM1		0x0c70
+
+#define REG_OFDM0_AGCR_SSI_TABLE	0x0c78
+
+#define REG_OFDM0_XA_TX_IQ_IMBALANCE	0x0c80
+#define REG_OFDM0_XB_TX_IQ_IMBALANCE	0x0c88
+#define REG_OFDM0_XC_TX_IQ_IMBALANCE	0x0c90
+#define REG_OFDM0_XD_TX_IQ_IMBALANCE	0x0c98
+
+#define REG_OFDM0_XC_TX_AFE		0x0c94
+#define REG_OFDM0_XD_TX_AFE		0x0c9c
+
+#define REG_OFDM0_RX_IQ_EXT_ANTA	0x0ca0
+
+#define REG_OFDM1_LSTF			0x0d00
+#define  OFDM_LSTF_PRIME_CH_LOW		BIT(10)
+#define  OFDM_LSTF_PRIME_CH_HIGH	BIT(11)
+#define  OFDM_LSTF_PRIME_CH_MASK	(OFDM_LSTF_PRIME_CH_LOW | \
+					 OFDM_LSTF_PRIME_CH_HIGH)
+#define  OFDM_LSTF_CONTINUE_TX		BIT(28)
+#define  OFDM_LSTF_SINGLE_CARRIER	BIT(29)
+#define  OFDM_LSTF_SINGLE_TONE		BIT(30)
+#define  OFDM_LSTF_MASK			0x70000000
+
+#define REG_OFDM1_TRX_PATH_ENABLE	0x0d04
+
+#define REG_TX_AGC_A_RATE18_06		0x0e00
+#define REG_TX_AGC_A_RATE54_24		0x0e04
+#define REG_TX_AGC_A_CCK1_MCS32		0x0e08
+#define REG_TX_AGC_A_MCS03_MCS00	0x0e10
+#define REG_TX_AGC_A_MCS07_MCS04	0x0e14
+#define REG_TX_AGC_A_MCS11_MCS08	0x0e18
+#define REG_TX_AGC_A_MCS15_MCS12	0x0e1c
+
+#define REG_FPGA0_IQK			0x0e28
+
+#define REG_TX_IQK_TONE_A		0x0e30
+#define REG_RX_IQK_TONE_A		0x0e34
+#define REG_TX_IQK_PI_A			0x0e38
+#define REG_RX_IQK_PI_A			0x0e3c
+
+#define REG_TX_IQK			0x0e40
+#define REG_RX_IQK			0x0e44
+#define REG_IQK_AGC_PTS			0x0e48
+#define REG_IQK_AGC_RSP			0x0e4c
+#define REG_TX_IQK_TONE_B		0x0e50
+#define REG_RX_IQK_TONE_B		0x0e54
+#define REG_TX_IQK_PI_B			0x0e58
+#define REG_RX_IQK_PI_B			0x0e5c
+#define REG_IQK_AGC_CONT		0x0e60
+
+#define REG_BLUETOOTH			0x0e6c
+#define REG_RX_WAIT_CCA			0x0e70
+#define REG_TX_CCK_RFON			0x0e74
+#define REG_TX_CCK_BBON			0x0e78
+#define REG_TX_OFDM_RFON		0x0e7c
+#define REG_TX_OFDM_BBON		0x0e80
+#define REG_TX_TO_RX			0x0e84
+#define REG_TX_TO_TX			0x0e88
+#define REG_RX_CCK			0x0e8c
+
+#define REG_TX_POWER_BEFORE_IQK_A	0x0e94
+#define REG_TX_POWER_AFTER_IQK_A	0x0e9c
+
+#define REG_RX_POWER_BEFORE_IQK_A	0x0ea0
+#define REG_RX_POWER_BEFORE_IQK_A_2	0x0ea4
+#define REG_RX_POWER_AFTER_IQK_A	0x0ea8
+#define REG_RX_POWER_AFTER_IQK_A_2	0x0eac
+
+#define REG_TX_POWER_BEFORE_IQK_B	0x0eb4
+#define REG_TX_POWER_AFTER_IQK_B	0x0ebc
+
+#define REG_RX_POWER_BEFORE_IQK_B	0x0ec0
+#define REG_RX_POWER_BEFORE_IQK_B_2	0x0ec4
+#define REG_RX_POWER_AFTER_IQK_B	0x0ec8
+#define REG_RX_POWER_AFTER_IQK_B_2	0x0ecc
+
+#define REG_RX_OFDM			0x0ed0
+#define REG_RX_WAIT_RIFS		0x0ed4
+#define REG_RX_TO_RX			0x0ed8
+#define REG_STANDBY			0x0edc
+#define REG_SLEEP			0x0ee0
+#define REG_PMPD_ANAEN			0x0eec
+
+#define REG_FW_START_ADDRESS		0x1000
+
+#define REG_USB_INFO			0xfe17
+#define REG_USB_HIMR			0xfe38
+#define  USB_HIMR_TIMEOUT2		BIT(31)
+#define  USB_HIMR_TIMEOUT1		BIT(30)
+#define  USB_HIMR_PSTIMEOUT		BIT(29)
+#define  USB_HIMR_GTINT4		BIT(28)
+#define  USB_HIMR_GTINT3		BIT(27)
+#define  USB_HIMR_TXBCNERR		BIT(26)
+#define  USB_HIMR_TXBCNOK		BIT(25)
+#define  USB_HIMR_TSF_BIT32_TOGGLE	BIT(24)
+#define  USB_HIMR_BCNDMAINT3		BIT(23)
+#define  USB_HIMR_BCNDMAINT2		BIT(22)
+#define  USB_HIMR_BCNDMAINT1		BIT(21)
+#define  USB_HIMR_BCNDMAINT0		BIT(20)
+#define  USB_HIMR_BCNDOK3		BIT(19)
+#define  USB_HIMR_BCNDOK2		BIT(18)
+#define  USB_HIMR_BCNDOK1		BIT(17)
+#define  USB_HIMR_BCNDOK0		BIT(16)
+#define  USB_HIMR_HSISR_IND		BIT(15)
+#define  USB_HIMR_BCNDMAINT_E		BIT(14)
+/* RSVD	BIT(13) */
+#define  USB_HIMR_CTW_END		BIT(12)
+/* RSVD	BIT(11) */
+#define  USB_HIMR_C2HCMD		BIT(10)
+#define  USB_HIMR_CPWM2			BIT(9)
+#define  USB_HIMR_CPWM			BIT(8)
+#define  USB_HIMR_HIGHDOK		BIT(7)	/*  High Queue DMA OK
+						    Interrupt */
+#define  USB_HIMR_MGNTDOK		BIT(6)	/*  Management Queue DMA OK
+						    Interrupt */
+#define  USB_HIMR_BKDOK			BIT(5)	/*  AC_BK DMA OK Interrupt */
+#define  USB_HIMR_BEDOK			BIT(4)	/*  AC_BE DMA OK Interrupt */
+#define  USB_HIMR_VIDOK			BIT(3)	/*  AC_VI DMA OK Interrupt */
+#define  USB_HIMR_VODOK			BIT(2)	/*  AC_VO DMA Interrupt */
+#define  USB_HIMR_RDU			BIT(1)	/*  Receive Descriptor
+						    Unavailable */
+#define  USB_HIMR_ROK			BIT(0)	/*  Receive DMA OK Interrupt */
+
+#define REG_USB_SPECIAL_OPTION		0xfe55
+#define REG_USB_DMA_AGG_TO		0xfe5b
+#define REG_USB_AGG_TO			0xfe5c
+#define REG_USB_AGG_TH			0xfe5d
+
+#define REG_NORMAL_SIE_VID		0xfe60	/* 0xfe60 - 0xfe61 */
+#define REG_NORMAL_SIE_PID		0xfe62	/* 0xfe62 - 0xfe63 */
+#define REG_NORMAL_SIE_OPTIONAL		0xfe64
+#define REG_NORMAL_SIE_EP		0xfe65	/* 0xfe65 - 0xfe67 */
+#define REG_NORMAL_SIE_EP_TX		0xfe66
+#define  NORMAL_SIE_EP_TX_HIGH_MASK	0x000f
+#define  NORMAL_SIE_EP_TX_NORMAL_MASK	0x00f0
+#define  NORMAL_SIE_EP_TX_LOW_MASK	0x0f00
+
+#define REG_NORMAL_SIE_PHY		0xfe68	/* 0xfe68 - 0xfe6b */
+#define REG_NORMAL_SIE_OPTIONAL2	0xfe6c
+#define REG_NORMAL_SIE_GPS_EP		0xfe6d	/* RTL8723 only */
+#define REG_NORMAL_SIE_MAC_ADDR		0xfe70	/* 0xfe70 - 0xfe75 */
+#define REG_NORMAL_SIE_STRING		0xfe80	/* 0xfe80 - 0xfedf */
+
+/* RF6052 registers */
+#define RF6052_REG_AC			0x00
+#define RF6052_REG_IQADJ_G1		0x01
+#define RF6052_REG_IQADJ_G2		0x02
+#define RF6052_REG_BS_PA_APSET_G1_G4	0x03
+#define RF6052_REG_BS_PA_APSET_G5_G8	0x04
+#define RF6052_REG_POW_TRSW		0x05
+#define RF6052_REG_GAIN_RX		0x06
+#define RF6052_REG_GAIN_TX		0x07
+#define RF6052_REG_TXM_IDAC		0x08
+#define RF6052_REG_IPA_G		0x09
+#define RF6052_REG_TXBIAS_G		0x0a
+#define RF6052_REG_TXPA_AG		0x0b
+#define RF6052_REG_IPA_A		0x0c
+#define RF6052_REG_TXBIAS_A		0x0d
+#define RF6052_REG_BS_PA_APSET_G9_G11	0x0e
+#define RF6052_REG_BS_IQGEN		0x0f
+#define RF6052_REG_MODE1		0x10
+#define RF6052_REG_MODE2		0x11
+#define RF6052_REG_RX_AGC_HP		0x12
+#define RF6052_REG_TX_AGC		0x13
+#define RF6052_REG_BIAS			0x14
+#define RF6052_REG_IPA			0x15
+#define RF6052_REG_TXBIAS		0x16
+#define RF6052_REG_POW_ABILITY		0x17
+#define RF6052_REG_MODE_AG		0x18	/* RF channel and BW switch */
+#define  MODE_AG_CHANNEL_MASK		0x3ff
+#define  MODE_AG_CHANNEL_20MHZ		BIT(10)
+
+#define RF6052_REG_TOP			0x19
+#define RF6052_REG_RX_G1		0x1a
+#define RF6052_REG_RX_G2		0x1b
+#define RF6052_REG_RX_BB2		0x1c
+#define RF6052_REG_RX_BB1		0x1d
+#define RF6052_REG_RCK1			0x1e
+#define RF6052_REG_RCK2			0x1f
+#define RF6052_REG_TX_G1		0x20
+#define RF6052_REG_TX_G2		0x21
+#define RF6052_REG_TX_G3		0x22
+#define RF6052_REG_TX_BB1		0x23
+#define RF6052_REG_T_METER		0x24
+#define RF6052_REG_SYN_G1		0x25	/* RF TX Power control */
+#define RF6052_REG_SYN_G2		0x26	/* RF TX Power control */
+#define RF6052_REG_SYN_G3		0x27	/* RF TX Power control */
+#define RF6052_REG_SYN_G4		0x28	/* RF TX Power control */
+#define RF6052_REG_SYN_G5		0x29	/* RF TX Power control */
+#define RF6052_REG_SYN_G6		0x2a	/* RF TX Power control */
+#define RF6052_REG_SYN_G7		0x2b	/* RF TX Power control */
+#define RF6052_REG_SYN_G8		0x2c	/* RF TX Power control */
+
+#define RF6052_REG_RCK_OS		0x30	/* RF TX PA control */
+
+#define RF6052_REG_TXPA_G1		0x31	/* RF TX PA control */
+#define RF6052_REG_TXPA_G2		0x32	/* RF TX PA control */
+#define RF6052_REG_TXPA_G3		0x33	/* RF TX PA control */
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/realtek/rtlwifi/Kconfig
similarity index 100%
rename from drivers/net/wireless/rtlwifi/Kconfig
rename to drivers/net/wireless/realtek/rtlwifi/Kconfig
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/Makefile
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/base.c
rename to drivers/net/wireless/realtek/rtlwifi/base.c
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/base.h
rename to drivers/net/wireless/realtek/rtlwifi/base.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8192e2ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b1ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a1ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtc8821a2ant.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h
rename to drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/cam.c
rename to drivers/net/wireless/realtek/rtlwifi/cam.c
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/realtek/rtlwifi/cam.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/cam.h
rename to drivers/net/wireless/realtek/rtlwifi/cam.h
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
similarity index 99%
rename from drivers/net/wireless/rtlwifi/core.c
rename to drivers/net/wireless/realtek/rtlwifi/core.c
index 585d088..c925a4d 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1373,7 +1373,7 @@
 			       struct ieee80211_vif *vif,
 			       enum ieee80211_ampdu_mlme_action action,
 			       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			       u8 buf_size)
+			       u8 buf_size, bool amsdu)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/realtek/rtlwifi/core.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/core.h
rename to drivers/net/wireless/realtek/rtlwifi/core.h
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/debug.c
rename to drivers/net/wireless/realtek/rtlwifi/debug.c
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/debug.h
rename to drivers/net/wireless/realtek/rtlwifi/debug.h
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/efuse.c
rename to drivers/net/wireless/realtek/rtlwifi/efuse.c
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/efuse.h
rename to drivers/net/wireless/realtek/rtlwifi/efuse.h
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/pci.c
rename to drivers/net/wireless/realtek/rtlwifi/pci.c
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
similarity index 99%
rename from drivers/net/wireless/rtlwifi/pci.h
rename to drivers/net/wireless/realtek/rtlwifi/pci.h
index d4567d1..5da6703 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -247,6 +247,8 @@
 	/* MSI support */
 	bool msi_support;
 	bool using_msi;
+	/* interrupt clear before set */
+	bool int_clear;
 };
 
 struct mp_adapter {
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/ps.c
rename to drivers/net/wireless/realtek/rtlwifi/ps.c
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/realtek/rtlwifi/ps.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/ps.h
rename to drivers/net/wireless/realtek/rtlwifi/ps.h
diff --git a/drivers/net/wireless/rtlwifi/pwrseqcmd.h b/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/pwrseqcmd.h
rename to drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rc.c
rename to drivers/net/wireless/realtek/rtlwifi/rc.c
diff --git a/drivers/net/wireless/rtlwifi/rc.h b/drivers/net/wireless/realtek/rtlwifi/rc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rc.h
rename to drivers/net/wireless/realtek/rtlwifi/rc.h
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/regd.c
rename to drivers/net/wireless/realtek/rtlwifi/regd.c
diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/realtek/rtlwifi/regd.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/regd.h
rename to drivers/net/wireless/realtek/rtlwifi/regd.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/main.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
similarity index 99%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 25db369..34ce064 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -1946,6 +1946,14 @@
 		rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
 		mac->rx_data_filter = *(u16 *)val;
 		break;
+	case HW_VAR_KEEP_ALIVE:{
+			u8 array[2];
+			array[0] = 0xff;
+			array[1] = *((u8 *)val);
+			rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2,
+					    array);
+			break;
+		}
 	default:
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
 			 "switch case not processed\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192de/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8192se/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/btc.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723be/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/main.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/Makefile
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/def.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/dm.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
similarity index 99%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index b7f18e21..6e9418e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2253,11 +2253,28 @@
 	}
 }
 
+static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw)
+{
+	struct rtl_priv *rtlpriv = rtl_priv(hw);
+	u32 tmp = rtl_read_dword(rtlpriv, REG_HISR);
+
+	rtl_write_dword(rtlpriv, REG_HISR, tmp);
+
+	tmp = rtl_read_dword(rtlpriv, REG_HISRE);
+	rtl_write_dword(rtlpriv, REG_HISRE, tmp);
+
+	tmp = rtl_read_dword(rtlpriv, REG_HSISR);
+	rtl_write_dword(rtlpriv, REG_HSISR, tmp);
+}
+
 void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
+	if (!rtlpci->int_clear)
+		rtl8821ae_clear_interrupt(hw);/*clear it here first*/
+
 	rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
 	rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
 	rtlpci->irq_enabled = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/hw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/led.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/led.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/phy.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/pwrseq.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/rf.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/rf.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
similarity index 97%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index a4988121..8ee141a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -96,6 +96,7 @@
 
 	rtl8821ae_bt_reg_init(hw);
 	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+	rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
 	rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
 
 	rtlpriv->dm.dm_initialgain_enable = 1;
@@ -167,6 +168,7 @@
 	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
 	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
 	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+	rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear;
 	if (rtlpriv->cfg->mod_params->disable_watchdog)
 		pr_info("watchdog disabled\n");
 	rtlpriv->psc.reg_fwctrl_lps = 3;
@@ -308,6 +310,7 @@
 	.swctrl_lps = false,
 	.fwctrl_lps = true,
 	.msi_support = true,
+	.int_clear = true,
 	.debug = DBG_EMERG,
 	.disable_watchdog = 0,
 };
@@ -437,6 +440,7 @@
 module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444);
 module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog,
 		   bool, 0444);
+module_param_named(int_clear, rtl8821ae_mod_params.int_clear, bool, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
@@ -444,6 +448,7 @@
 MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
 MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
 MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
+MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/sw.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/table.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/table.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/rtl8821ae/trx.h
rename to drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h
diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/realtek/rtlwifi/stats.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/stats.c
rename to drivers/net/wireless/realtek/rtlwifi/stats.c
diff --git a/drivers/net/wireless/rtlwifi/stats.h b/drivers/net/wireless/realtek/rtlwifi/stats.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/stats.h
rename to drivers/net/wireless/realtek/rtlwifi/stats.h
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
similarity index 100%
rename from drivers/net/wireless/rtlwifi/usb.c
rename to drivers/net/wireless/realtek/rtlwifi/usb.c
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
similarity index 100%
rename from drivers/net/wireless/rtlwifi/usb.h
rename to drivers/net/wireless/realtek/rtlwifi/usb.h
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
similarity index 99%
rename from drivers/net/wireless/rtlwifi/wifi.h
rename to drivers/net/wireless/realtek/rtlwifi/wifi.h
index b90ca61..4544752 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2249,6 +2249,9 @@
 
 	/* default 0: 1 means disable */
 	bool disable_watchdog;
+
+	/* default 0: 1 means do not disable interrupts */
+	bool int_clear;
 };
 
 struct rtl_hal_usbint_cfg {
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 7e80432..b5bcc93 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -664,6 +664,7 @@
  * @tid: Traffic identifier.
  * @ssn: Pointer to ssn value.
  * @buf_size: Buffer size (for kernel version > 2.6.38).
+ * @amsdu: is AMSDU in AMPDU allowed
  *
  * Return: status: 0 on success, negative error code on failure.
  */
@@ -673,7 +674,8 @@
 				     struct ieee80211_sta *sta,
 				     unsigned short tid,
 				     unsigned short *ssn,
-				     unsigned char buf_size)
+				     unsigned char buf_size,
+				     bool amsdu)
 {
 	int status = -EOPNOTSUPP;
 	struct rsi_hw *adapter = hw->priv;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 9524564..9733b31 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -7937,7 +7937,7 @@
 int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			enum ieee80211_ampdu_mlme_action action,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			u8 buf_size)
+			u8 buf_size, bool amsdu)
 {
 	struct rt2x00_sta *sta_priv = (struct rt2x00_sta *)sta->drv_priv;
 	int ret = 0;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 1609b8a..440790b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -220,7 +220,7 @@
 int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			enum ieee80211_ampdu_mlme_action action,
 			struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			u8 buf_size);
+			u8 buf_size, bool amsdu);
 int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
 		      struct survey_info *survey);
 void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 5932306..bf9afbf 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1114,6 +1114,7 @@
 	{ USB_DEVICE(0x0db0, 0x871c) },
 	{ USB_DEVICE(0x0db0, 0x899a) },
 	/* Ovislink */
+	{ USB_DEVICE(0x1b75, 0x3070) },
 	{ USB_DEVICE(0x1b75, 0x3071) },
 	{ USB_DEVICE(0x1b75, 0x3072) },
 	{ USB_DEVICE(0x1b75, 0xa200) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 48a2cad..7e8bb11 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -266,7 +266,7 @@
 		if (beacon_diff > beacon_int)
 			beacon_diff = 0;
 
-		autowake_timeout = (conf->max_sleep_period * beacon_int) - beacon_diff;
+		autowake_timeout = (conf->ps_dtim_period * beacon_int) - beacon_diff;
 		queue_delayed_work(rt2x00dev->workqueue,
 				   &rt2x00dev->autowakeup_work,
 				   autowake_timeout - 15);
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index 7c355ff..ebed13a 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -350,7 +350,8 @@
 	cfg->bss_type = SCAN_BSS_TYPE_ANY;
 	/* currently NL80211 supports only a single interval */
 	for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
-		cfg->intervals[i] = cpu_to_le32(req->interval);
+		cfg->intervals[i] = cpu_to_le32(req->scan_plans[0].interval *
+						MSEC_PER_SEC);
 
 	cfg->ssid_len = 0;
 	ret = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index abbf054..50cce42 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -2115,3 +2115,4 @@
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
 MODULE_FIRMWARE(WL18XX_FW_NAME);
+MODULE_FIRMWARE(WL18XX_CONF_FILE_NAME);
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index c938c49..bc15aa2 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -228,13 +228,15 @@
 	wl18xx_adjust_channels(cmd, cmd_channels);
 
 	if (c->num_short_intervals && c->long_interval &&
-	    c->long_interval > req->interval) {
-		cmd->short_cycles_msec = cpu_to_le16(req->interval);
+	    c->long_interval > req->scan_plans[0].interval * MSEC_PER_SEC) {
+		cmd->short_cycles_msec =
+			cpu_to_le16(req->scan_plans[0].interval * MSEC_PER_SEC);
 		cmd->long_cycles_msec = cpu_to_le16(c->long_interval);
 		cmd->short_cycles_count = c->num_short_intervals;
 	} else {
 		cmd->short_cycles_msec = 0;
-		cmd->long_cycles_msec = cpu_to_le16(req->interval);
+		cmd->long_cycles_msec =
+			cpu_to_le16(req->scan_plans[0].interval * MSEC_PER_SEC);
 		cmd->short_cycles_count = 0;
 	}
 	wl1271_debug(DEBUG_SCAN, "short_interval: %d, long_interval: %d, num_short: %d",
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index e819369..ec7f6af 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5263,7 +5263,7 @@
 				  struct ieee80211_vif *vif,
 				  enum ieee80211_ampdu_mlme_action action,
 				  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-				  u8 buf_size)
+				  u8 buf_size, bool amsdu)
 {
 	struct wl1271 *wl = hw->priv;
 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index a1b6040..906be6a 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -318,7 +318,7 @@
 	bool watchdog_recovery;
 
 	/* Reg domain last configuration */
-	u32 reg_ch_conf_last[2];
+	u32 reg_ch_conf_last[2]  __aligned(8);
 	/* Reg domain pending configuration */
 	u32 reg_ch_conf_pending[2];
 
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 929a6e7..56ebd82 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -788,6 +788,12 @@
 	/* Use the number of queues requested by the frontend */
 	be->vif->queues = vzalloc(requested_num_queues *
 				  sizeof(struct xenvif_queue));
+	if (!be->vif->queues) {
+		xenbus_dev_fatal(dev, -ENOMEM,
+				 "allocating queues");
+		return;
+	}
+
 	be->vif->num_queues = requested_num_queues;
 	be->vif->stalled_queues = requested_num_queues;
 
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 9bf63c2..441b158 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1706,19 +1706,19 @@
 }
 
 static int xennet_create_queues(struct netfront_info *info,
-				unsigned int num_queues)
+				unsigned int *num_queues)
 {
 	unsigned int i;
 	int ret;
 
-	info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
+	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
 			       GFP_KERNEL);
 	if (!info->queues)
 		return -ENOMEM;
 
 	rtnl_lock();
 
-	for (i = 0; i < num_queues; i++) {
+	for (i = 0; i < *num_queues; i++) {
 		struct netfront_queue *queue = &info->queues[i];
 
 		queue->id = i;
@@ -1728,7 +1728,7 @@
 		if (ret < 0) {
 			dev_warn(&info->netdev->dev,
 				 "only created %d queues\n", i);
-			num_queues = i;
+			*num_queues = i;
 			break;
 		}
 
@@ -1738,11 +1738,11 @@
 			napi_enable(&queue->napi);
 	}
 
-	netif_set_real_num_tx_queues(info->netdev, num_queues);
+	netif_set_real_num_tx_queues(info->netdev, *num_queues);
 
 	rtnl_unlock();
 
-	if (num_queues == 0) {
+	if (*num_queues == 0) {
 		dev_err(&info->netdev->dev, "no queues\n");
 		return -EINVAL;
 	}
@@ -1788,7 +1788,7 @@
 	if (info->queues)
 		xennet_destroy_queues(info);
 
-	err = xennet_create_queues(info, num_queues);
+	err = xennet_create_queues(info, &num_queues);
 	if (err < 0)
 		goto destroy_ring;
 
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 6639cd1..0d6003d 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -68,6 +68,7 @@
 
 	  If unsure, say N.
 
+source "drivers/nfc/fdp/Kconfig"
 source "drivers/nfc/pn544/Kconfig"
 source "drivers/nfc/microread/Kconfig"
 source "drivers/nfc/nfcmrvl/Kconfig"
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 2757fe1b..e362141 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -2,6 +2,7 @@
 # Makefile for nfc devices
 #
 
+obj-$(CONFIG_NFC_FDP)		+= fdp/
 obj-$(CONFIG_NFC_PN544)		+= pn544/
 obj-$(CONFIG_NFC_MICROREAD)	+= microread/
 obj-$(CONFIG_NFC_PN533)		+= pn533.o
diff --git a/drivers/nfc/fdp/Kconfig b/drivers/nfc/fdp/Kconfig
new file mode 100644
index 0000000..fbccd9d
--- /dev/null
+++ b/drivers/nfc/fdp/Kconfig
@@ -0,0 +1,23 @@
+config NFC_FDP
+	tristate "Intel FDP NFC driver"
+	depends on NFC_NCI
+	select CRC_CCITT
+	default n
+	---help---
+	  Intel Fields Peak NFC controller core driver.
+	  This is a driver based on the NCI NFC kernel layers.
+
+	  To compile this driver as a module, choose m here. The module will
+	  be called fdp.
+	  Say N if unsure.
+
+config NFC_FDP_I2C
+	tristate "NFC FDP i2c support"
+	depends on NFC_FDP && I2C
+	---help---
+	  This module adds support for the Intel Fields Peak NFC controller
+	  i2c interface.
+	  Select this if your platform is using the i2c bus.
+
+	  If you choose to build a module, it'll be called fdp_i2c.
+	  Say N if unsure.
diff --git a/drivers/nfc/fdp/Makefile b/drivers/nfc/fdp/Makefile
new file mode 100644
index 0000000..e79d51b
--- /dev/null
+++ b/drivers/nfc/fdp/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for FDP NCI based NFC driver
+#
+
+obj-$(CONFIG_NFC_FDP)     += fdp.o
+obj-$(CONFIG_NFC_FDP_I2C) += fdp_i2c.o
+
+fdp_i2c-objs  = i2c.o
+
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
new file mode 100644
index 0000000..ccb07a1
--- /dev/null
+++ b/drivers/nfc/fdp/fdp.c
@@ -0,0 +1,817 @@
+/* -------------------------------------------------------------------------
+ * Copyright (C) 2014-2016, Intel Corporation
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ * -------------------------------------------------------------------------
+ */
+
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <net/nfc/nci_core.h>
+
+#include "fdp.h"
+
+#define FDP_OTP_PATCH_NAME			"otp.bin"
+#define FDP_RAM_PATCH_NAME			"ram.bin"
+#define FDP_FW_HEADER_SIZE			576
+#define FDP_FW_UPDATE_SLEEP			1000
+
+#define NCI_GET_VERSION_TIMEOUT			8000
+#define NCI_PATCH_REQUEST_TIMEOUT		8000
+#define FDP_PATCH_CONN_DEST			0xC2
+#define FDP_PATCH_CONN_PARAM_TYPE		0xA0
+
+#define NCI_PATCH_TYPE_RAM			0x00
+#define NCI_PATCH_TYPE_OTP			0x01
+#define NCI_PATCH_TYPE_EOT			0xFF
+
+#define NCI_PARAM_ID_FW_RAM_VERSION		0xA0
+#define NCI_PARAM_ID_FW_OTP_VERSION		0xA1
+#define NCI_PARAM_ID_OTP_LIMITED_VERSION	0xC5
+#define NCI_PARAM_ID_KEY_INDEX_ID		0xC6
+
+#define NCI_GID_PROP				0x0F
+#define NCI_OP_PROP_PATCH_OID			0x08
+#define NCI_OP_PROP_SET_PDATA_OID		0x23
+
+struct fdp_nci_info {
+	struct nfc_phy_ops *phy_ops;
+	struct fdp_i2c_phy *phy;
+	struct nci_dev *ndev;
+
+	const struct firmware *otp_patch;
+	const struct firmware *ram_patch;
+	u32 otp_patch_version;
+	u32 ram_patch_version;
+
+	u32 otp_version;
+	u32 ram_version;
+	u32 limited_otp_version;
+	u8 key_index;
+
+	u8 *fw_vsc_cfg;
+	u8 clock_type;
+	u32 clock_freq;
+
+	atomic_t data_pkt_counter;
+	void (*data_pkt_counter_cb)(struct nci_dev *ndev);
+	u8 setup_patch_sent;
+	u8 setup_patch_ntf;
+	u8 setup_patch_status;
+	u8 setup_reset_ntf;
+	wait_queue_head_t setup_wq;
+};
+
+static u8 nci_core_get_config_otp_ram_version[5] = {
+	0x04,
+	NCI_PARAM_ID_FW_RAM_VERSION,
+	NCI_PARAM_ID_FW_OTP_VERSION,
+	NCI_PARAM_ID_OTP_LIMITED_VERSION,
+	NCI_PARAM_ID_KEY_INDEX_ID
+};
+
+struct nci_core_get_config_rsp {
+	u8 status;
+	u8 count;
+	u8 data[0];
+};
+
+static int fdp_nci_create_conn(struct nci_dev *ndev)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct core_conn_create_dest_spec_params param;
+	int r;
+
+	/* proprietary destination specific paramerer without value */
+	param.type = FDP_PATCH_CONN_PARAM_TYPE;
+	param.length = 0x00;
+
+	r = nci_core_conn_create(info->ndev, FDP_PATCH_CONN_DEST, 1,
+				 sizeof(param), &param);
+	if (r)
+		return r;
+
+	return nci_get_conn_info_by_id(ndev, 0);
+}
+
+static inline int fdp_nci_get_versions(struct nci_dev *ndev)
+{
+	return nci_core_cmd(ndev, NCI_OP_CORE_GET_CONFIG_CMD,
+			    sizeof(nci_core_get_config_otp_ram_version),
+			    (__u8 *) &nci_core_get_config_otp_ram_version);
+}
+
+static inline int fdp_nci_patch_cmd(struct nci_dev *ndev, u8 type)
+{
+	return nci_prop_cmd(ndev, NCI_OP_PROP_PATCH_OID, sizeof(type), &type);
+}
+
+static inline int fdp_nci_set_production_data(struct nci_dev *ndev, u8 len,
+					      char *data)
+{
+	return nci_prop_cmd(ndev, NCI_OP_PROP_SET_PDATA_OID, len, data);
+}
+
+static int fdp_nci_set_clock(struct nci_dev *ndev, u8 clock_type,
+			     u32 clock_freq)
+{
+	u32 fc = 13560;
+	u32 nd, num, delta;
+	char data[9];
+
+	nd = (24 * fc) / clock_freq;
+	delta = 24 * fc - nd * clock_freq;
+	num = (32768 * delta) / clock_freq;
+
+	data[0] = 0x00;
+	data[1] = 0x00;
+	data[2] = 0x00;
+
+	data[3] = 0x10;
+	data[4] = 0x04;
+	data[5] = num & 0xFF;
+	data[6] = (num >> 8) & 0xff;
+	data[7] = nd;
+	data[8] = clock_type;
+
+	return fdp_nci_set_production_data(ndev, 9, data);
+}
+
+static void fdp_nci_send_patch_cb(struct nci_dev *ndev)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+
+	info->setup_patch_sent = 1;
+	wake_up(&info->setup_wq);
+}
+
+/**
+ * Register a packet sent counter and a callback
+ *
+ * We have no other way of knowing when all firmware packets were sent out
+ * on the i2c bus. We need to know that in order to close the connection and
+ * send the patch end message.
+ */
+static void fdp_nci_set_data_pkt_counter(struct nci_dev *ndev,
+				  void (*cb)(struct nci_dev *ndev), int count)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+
+	dev_dbg(dev, "NCI data pkt counter %d\n", count);
+	atomic_set(&info->data_pkt_counter, count);
+	info->data_pkt_counter_cb = cb;
+}
+
+/**
+ * The device is expecting a stream of packets. All packets need to
+ * have the PBF flag set to 0x0 (last packet) even if the firmware
+ * file is segmented and there are multiple packets. If we give the
+ * whole firmware to nci_send_data it will segment it and it will set
+ * the PBF flag to 0x01 so we need to do the segmentation here.
+ *
+ * The firmware will be analyzed and applied when we send NCI_OP_PROP_PATCH_CMD
+ * command with NCI_PATCH_TYPE_EOT parameter. The device will send a
+ * NFCC_PATCH_NTF packaet and a NCI_OP_CORE_RESET_NTF packet.
+ */
+static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	const struct firmware *fw;
+	struct sk_buff *skb;
+	unsigned long len;
+	u8 max_size, payload_size;
+	int rc = 0;
+
+	if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) ||
+	    (type == NCI_PATCH_TYPE_RAM && !info->ram_patch))
+		return -EINVAL;
+
+	if (type == NCI_PATCH_TYPE_OTP)
+		fw = info->otp_patch;
+	else
+		fw = info->ram_patch;
+
+	max_size = nci_conn_max_data_pkt_payload_size(ndev, conn_id);
+	if (max_size <= 0)
+		return -EINVAL;
+
+	len = fw->size;
+
+	fdp_nci_set_data_pkt_counter(ndev, fdp_nci_send_patch_cb,
+				     DIV_ROUND_UP(fw->size, max_size));
+
+	while (len) {
+
+		payload_size = min_t(unsigned long, (unsigned long) max_size,
+				     len);
+
+		skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size),
+				    GFP_KERNEL);
+		if (!skb) {
+			fdp_nci_set_data_pkt_counter(ndev, NULL, 0);
+			return -ENOMEM;
+		}
+
+
+		skb_reserve(skb, NCI_CTRL_HDR_SIZE);
+
+		memcpy(skb_put(skb, payload_size), fw->data + (fw->size - len),
+		       payload_size);
+
+		rc = nci_send_data(ndev, conn_id, skb);
+
+		if (rc) {
+			fdp_nci_set_data_pkt_counter(ndev, NULL, 0);
+			return rc;
+		}
+
+		len -= payload_size;
+	}
+
+	return rc;
+}
+
+static int fdp_nci_open(struct nci_dev *ndev)
+{
+	int r;
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	r = info->phy_ops->enable(info->phy);
+
+	return r;
+}
+
+static int fdp_nci_close(struct nci_dev *ndev)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+
+	dev_dbg(dev, "%s\n", __func__);
+	return 0;
+}
+
+static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	if (atomic_dec_and_test(&info->data_pkt_counter))
+		info->data_pkt_counter_cb(ndev);
+
+	return info->phy_ops->write(info->phy, skb);
+}
+
+int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+
+	dev_dbg(dev, "%s\n", __func__);
+	return nci_recv_frame(ndev, skb);
+}
+EXPORT_SYMBOL(fdp_nci_recv_frame);
+
+static int fdp_nci_request_firmware(struct nci_dev *ndev)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+	u8 *data;
+	int r;
+
+	r = request_firmware(&info->ram_patch, FDP_RAM_PATCH_NAME, dev);
+	if (r < 0) {
+		nfc_err(dev, "RAM patch request error\n");
+		goto error;
+	}
+
+	data = (u8 *) info->ram_patch->data;
+	info->ram_patch_version =
+		data[FDP_FW_HEADER_SIZE] |
+		(data[FDP_FW_HEADER_SIZE + 1] << 8) |
+		(data[FDP_FW_HEADER_SIZE + 2] << 16) |
+		(data[FDP_FW_HEADER_SIZE + 3] << 24);
+
+	dev_dbg(dev, "RAM patch version: %d, size: %d\n",
+		  info->ram_patch_version, (int) info->ram_patch->size);
+
+
+	r = request_firmware(&info->otp_patch, FDP_OTP_PATCH_NAME, dev);
+	if (r < 0) {
+		nfc_err(dev, "OTP patch request error\n");
+		goto out;
+	}
+
+	data = (u8 *) info->otp_patch->data;
+	info->otp_patch_version =
+		data[FDP_FW_HEADER_SIZE] |
+		(data[FDP_FW_HEADER_SIZE + 1] << 8) |
+		(data[FDP_FW_HEADER_SIZE+2] << 16) |
+		(data[FDP_FW_HEADER_SIZE+3] << 24);
+
+	dev_dbg(dev, "OTP patch version: %d, size: %d\n",
+		 info->otp_patch_version, (int) info->otp_patch->size);
+out:
+	return 0;
+error:
+	return r;
+}
+
+static void fdp_nci_release_firmware(struct nci_dev *ndev)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+
+	if (info->otp_patch) {
+		release_firmware(info->otp_patch);
+		info->otp_patch = NULL;
+	}
+
+	if (info->ram_patch) {
+		release_firmware(info->ram_patch);
+		info->otp_patch = NULL;
+	}
+}
+
+static int fdp_nci_patch_otp(struct nci_dev *ndev)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+	u8 conn_id;
+	int r = 0;
+
+	if (info->otp_version >= info->otp_patch_version)
+		goto out;
+
+	info->setup_patch_sent = 0;
+	info->setup_reset_ntf = 0;
+	info->setup_patch_ntf = 0;
+
+	/* Patch init request */
+	r = fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_OTP);
+	if (r)
+		goto out;
+
+	/* Patch data connection creation */
+	conn_id = fdp_nci_create_conn(ndev);
+	if (conn_id < 0) {
+		r = conn_id;
+		goto out;
+	}
+
+	/* Send the patch over the data connection */
+	r = fdp_nci_send_patch(ndev, conn_id, NCI_PATCH_TYPE_OTP);
+	if (r)
+		goto out;
+
+	/* Wait for all the packets to be send over i2c */
+	wait_event_interruptible(info->setup_wq,
+				 info->setup_patch_sent == 1);
+
+	/* make sure that the NFCC processed the last data packet */
+	msleep(FDP_FW_UPDATE_SLEEP);
+
+	/* Close the data connection */
+	r = nci_core_conn_close(info->ndev, conn_id);
+	if (r)
+		goto out;
+
+	/* Patch finish message */
+	if (fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_EOT)) {
+		nfc_err(dev, "OTP patch error 0x%x\n", r);
+		r = -EINVAL;
+		goto out;
+	}
+
+	/* If the patch notification didn't arrive yet, wait for it */
+	wait_event_interruptible(info->setup_wq, info->setup_patch_ntf);
+
+	/* Check if the patching was successful */
+	r = info->setup_patch_status;
+	if (r) {
+		nfc_err(dev, "OTP patch error 0x%x\n", r);
+		r = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * We need to wait for the reset notification before we
+	 * can continue
+	 */
+	wait_event_interruptible(info->setup_wq, info->setup_reset_ntf);
+
+out:
+	return r;
+}
+
+static int fdp_nci_patch_ram(struct nci_dev *ndev)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+	u8 conn_id;
+	int r = 0;
+
+	if (info->ram_version >= info->ram_patch_version)
+		goto out;
+
+	info->setup_patch_sent = 0;
+	info->setup_reset_ntf = 0;
+	info->setup_patch_ntf = 0;
+
+	/* Patch init request */
+	r = fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_RAM);
+	if (r)
+		goto out;
+
+	/* Patch data connection creation */
+	conn_id = fdp_nci_create_conn(ndev);
+	if (conn_id < 0) {
+		r = conn_id;
+		goto out;
+	}
+
+	/* Send the patch over the data connection */
+	r = fdp_nci_send_patch(ndev, conn_id, NCI_PATCH_TYPE_RAM);
+	if (r)
+		goto out;
+
+	/* Wait for all the packets to be send over i2c */
+	wait_event_interruptible(info->setup_wq,
+				 info->setup_patch_sent == 1);
+
+	/* make sure that the NFCC processed the last data packet */
+	msleep(FDP_FW_UPDATE_SLEEP);
+
+	/* Close the data connection */
+	r = nci_core_conn_close(info->ndev, conn_id);
+	if (r)
+		goto out;
+
+	/* Patch finish message */
+	if (fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_EOT)) {
+		nfc_err(dev, "RAM patch error 0x%x\n", r);
+		r = -EINVAL;
+		goto out;
+	}
+
+	/* If the patch notification didn't arrive yet, wait for it */
+	wait_event_interruptible(info->setup_wq, info->setup_patch_ntf);
+
+	/* Check if the patching was successful */
+	r = info->setup_patch_status;
+	if (r) {
+		nfc_err(dev, "RAM patch error 0x%x\n", r);
+		r = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * We need to wait for the reset notification before we
+	 * can continue
+	 */
+	wait_event_interruptible(info->setup_wq, info->setup_reset_ntf);
+
+out:
+	return r;
+}
+
+static int fdp_nci_setup(struct nci_dev *ndev)
+{
+	/* Format: total length followed by an NCI packet */
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+	int r;
+	u8 patched = 0;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	r = nci_core_init(ndev);
+	if (r)
+		goto error;
+
+	/* Get RAM and OTP version */
+	r = fdp_nci_get_versions(ndev);
+	if (r)
+		goto error;
+
+	/* Load firmware from disk */
+	r = fdp_nci_request_firmware(ndev);
+	if (r)
+		goto error;
+
+	/* Update OTP */
+	if (info->otp_version < info->otp_patch_version) {
+		r = fdp_nci_patch_otp(ndev);
+		if (r)
+			goto error;
+		patched = 1;
+	}
+
+	/* Update RAM */
+	if (info->ram_version < info->ram_patch_version) {
+		r = fdp_nci_patch_ram(ndev);
+		if (r)
+			goto error;
+		patched = 1;
+	}
+
+	/* Release the firmware buffers */
+	fdp_nci_release_firmware(ndev);
+
+	/* If a patch was applied the new version is checked */
+	if (patched) {
+		r = nci_core_init(ndev);
+		if (r)
+			goto error;
+
+		r = fdp_nci_get_versions(ndev);
+		if (r)
+			goto error;
+
+		if (info->otp_version != info->otp_patch_version ||
+		    info->ram_version != info->ram_patch_version) {
+			nfc_err(dev, "Firmware update failed");
+			r = -EINVAL;
+			goto error;
+		}
+	}
+
+	/*
+	 * We initialized the devices but the NFC subsystem expects
+	 * it to not be initialized.
+	 */
+	return nci_core_reset(ndev);
+
+error:
+	fdp_nci_release_firmware(ndev);
+	nfc_err(dev, "Setup error %d\n", r);
+	return r;
+}
+
+static int fdp_nci_post_setup(struct nci_dev *ndev)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+	int r;
+
+	/* Check if the device has VSC */
+	if (info->fw_vsc_cfg && info->fw_vsc_cfg[0]) {
+
+		/* Set the vendor specific configuration */
+		r = fdp_nci_set_production_data(ndev, info->fw_vsc_cfg[3],
+						&info->fw_vsc_cfg[4]);
+		if (r) {
+			nfc_err(dev, "Vendor specific config set error %d\n",
+				r);
+			return r;
+		}
+	}
+
+	/* Set clock type and frequency */
+	r = fdp_nci_set_clock(ndev, info->clock_type, info->clock_freq);
+	if (r) {
+		nfc_err(dev, "Clock set error %d\n", r);
+		return r;
+	}
+
+	/*
+	 * In order to apply the VSC FDP needs a reset
+	 */
+	r = nci_core_reset(ndev);
+	if (r)
+		return r;
+
+	/**
+	 * The nci core was initialized when post setup was called
+	 * so we leave it like that
+	 */
+	return nci_core_init(ndev);
+}
+
+static int fdp_nci_core_reset_ntf_packet(struct nci_dev *ndev,
+					  struct sk_buff *skb)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+
+	dev_dbg(dev, "%s\n", __func__);
+	info->setup_reset_ntf = 1;
+	wake_up(&info->setup_wq);
+
+	return 0;
+}
+
+static int fdp_nci_prop_patch_ntf_packet(struct nci_dev *ndev,
+					  struct sk_buff *skb)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+
+	dev_dbg(dev, "%s\n", __func__);
+	info->setup_patch_ntf = 1;
+	info->setup_patch_status = skb->data[0];
+	wake_up(&info->setup_wq);
+
+	return 0;
+}
+
+static int fdp_nci_prop_patch_rsp_packet(struct nci_dev *ndev,
+					  struct sk_buff *skb)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+	u8 status = skb->data[0];
+
+	dev_dbg(dev, "%s: status 0x%x\n", __func__, status);
+	nci_req_complete(ndev, status);
+
+	return 0;
+}
+
+static int fdp_nci_prop_set_production_data_rsp_packet(struct nci_dev *ndev,
+							struct sk_buff *skb)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+	u8 status = skb->data[0];
+
+	dev_dbg(dev, "%s: status 0x%x\n", __func__, status);
+	nci_req_complete(ndev, status);
+
+	return 0;
+}
+
+static int fdp_nci_core_get_config_rsp_packet(struct nci_dev *ndev,
+						struct sk_buff *skb)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+	struct nci_core_get_config_rsp *rsp = (void *) skb->data;
+	u8 i, *p;
+
+	if (rsp->status == NCI_STATUS_OK) {
+
+		p = rsp->data;
+		for (i = 0; i < 4; i++) {
+
+			switch (*p++) {
+			case NCI_PARAM_ID_FW_RAM_VERSION:
+				p++;
+				info->ram_version = le32_to_cpup((__le32 *) p);
+				p += 4;
+				break;
+			case NCI_PARAM_ID_FW_OTP_VERSION:
+				p++;
+				info->otp_version = le32_to_cpup((__le32 *) p);
+				p += 4;
+				break;
+			case NCI_PARAM_ID_OTP_LIMITED_VERSION:
+				p++;
+				info->otp_version = le32_to_cpup((__le32 *) p);
+				p += 4;
+				break;
+			case NCI_PARAM_ID_KEY_INDEX_ID:
+				p++;
+				info->key_index = *p++;
+			}
+		}
+	}
+
+	dev_dbg(dev, "OTP version %d\n", info->otp_version);
+	dev_dbg(dev, "RAM version %d\n", info->ram_version);
+	dev_dbg(dev, "key index %d\n", info->key_index);
+	dev_dbg(dev, "%s: status 0x%x\n", __func__, rsp->status);
+
+	nci_req_complete(ndev, rsp->status);
+
+	return 0;
+}
+
+static struct nci_driver_ops fdp_core_ops[] = {
+	{
+		.opcode = NCI_OP_CORE_GET_CONFIG_RSP,
+		.rsp = fdp_nci_core_get_config_rsp_packet,
+	},
+	{
+		.opcode = NCI_OP_CORE_RESET_NTF,
+		.ntf = fdp_nci_core_reset_ntf_packet,
+	},
+};
+
+static struct nci_driver_ops fdp_prop_ops[] = {
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROP, NCI_OP_PROP_PATCH_OID),
+		.rsp = fdp_nci_prop_patch_rsp_packet,
+		.ntf = fdp_nci_prop_patch_ntf_packet,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROP,
+					  NCI_OP_PROP_SET_PDATA_OID),
+		.rsp = fdp_nci_prop_set_production_data_rsp_packet,
+	},
+};
+
+struct nci_ops nci_ops = {
+	.open = fdp_nci_open,
+	.close = fdp_nci_close,
+	.send = fdp_nci_send,
+	.setup = fdp_nci_setup,
+	.post_setup = fdp_nci_post_setup,
+	.prop_ops = fdp_prop_ops,
+	.n_prop_ops = ARRAY_SIZE(fdp_prop_ops),
+	.core_ops = fdp_core_ops,
+	.n_core_ops = ARRAY_SIZE(fdp_core_ops),
+};
+
+int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops,
+			struct nci_dev **ndevp, int tx_headroom,
+			int tx_tailroom, u8 clock_type, u32 clock_freq,
+			u8 *fw_vsc_cfg)
+{
+	struct device *dev = &phy->i2c_dev->dev;
+	struct fdp_nci_info *info;
+	struct nci_dev *ndev;
+	u32 protocols;
+	int r;
+
+	info = kzalloc(sizeof(struct fdp_nci_info), GFP_KERNEL);
+	if (!info) {
+		r = -ENOMEM;
+		goto err_info_alloc;
+	}
+
+	info->phy = phy;
+	info->phy_ops = phy_ops;
+	info->clock_type = clock_type;
+	info->clock_freq = clock_freq;
+	info->fw_vsc_cfg = fw_vsc_cfg;
+
+	init_waitqueue_head(&info->setup_wq);
+
+	protocols = NFC_PROTO_JEWEL_MASK |
+		    NFC_PROTO_MIFARE_MASK |
+		    NFC_PROTO_FELICA_MASK |
+		    NFC_PROTO_ISO14443_MASK |
+		    NFC_PROTO_ISO14443_B_MASK |
+		    NFC_PROTO_NFC_DEP_MASK |
+		    NFC_PROTO_ISO15693_MASK;
+
+	ndev = nci_allocate_device(&nci_ops, protocols, tx_headroom,
+				   tx_tailroom);
+	if (!ndev) {
+		nfc_err(dev, "Cannot allocate nfc ndev\n");
+		r = -ENOMEM;
+		goto err_alloc_ndev;
+	}
+
+	r = nci_register_device(ndev);
+	if (r)
+		goto err_regdev;
+
+	*ndevp = ndev;
+	info->ndev = ndev;
+
+	nci_set_drvdata(ndev, info);
+
+	return 0;
+
+err_regdev:
+	nci_free_device(ndev);
+err_alloc_ndev:
+	kfree(info);
+err_info_alloc:
+	return r;
+}
+EXPORT_SYMBOL(fdp_nci_probe);
+
+void fdp_nci_remove(struct nci_dev *ndev)
+{
+	struct fdp_nci_info *info = nci_get_drvdata(ndev);
+	struct device *dev = &info->phy->i2c_dev->dev;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	nci_unregister_device(ndev);
+	nci_free_device(ndev);
+	kfree(info);
+}
+EXPORT_SYMBOL(fdp_nci_remove);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("NFC NCI driver for Intel Fields Peak NFC controller");
+MODULE_AUTHOR("Robert Dolca <robert.dolca@intel.com>");
diff --git a/drivers/nfc/fdp/fdp.h b/drivers/nfc/fdp/fdp.h
new file mode 100644
index 0000000..0bd36c0
--- /dev/null
+++ b/drivers/nfc/fdp/fdp.h
@@ -0,0 +1,38 @@
+/* -------------------------------------------------------------------------
+ * Copyright (C) 2014-2016, Intel Corporation
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ * -------------------------------------------------------------------------
+ */
+
+#ifndef __LOCAL_FDP_H_
+#define __LOCAL_FDP_H_
+
+#include <net/nfc/nci_core.h>
+#include <linux/gpio/consumer.h>
+
+struct fdp_i2c_phy {
+	struct i2c_client *i2c_dev;
+	struct gpio_desc *power_gpio;
+	struct nci_dev *ndev;
+
+	/* < 0 if i2c error occurred */
+	int hard_fault;
+	uint16_t next_read_size;
+};
+
+int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops,
+		  struct nci_dev **ndev, int tx_headroom, int tx_tailroom,
+		  u8 clock_type, u32 clock_freq, u8 *fw_vsc_cfg);
+void fdp_nci_remove(struct nci_dev *ndev);
+int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
+
+#endif /* __LOCAL_FDP_H_ */
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
new file mode 100644
index 0000000..532db28
--- /dev/null
+++ b/drivers/nfc/fdp/i2c.c
@@ -0,0 +1,388 @@
+/* -------------------------------------------------------------------------
+ * Copyright (C) 2014-2016, Intel Corporation
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ * -------------------------------------------------------------------------
+ */
+
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/nfc.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <net/nfc/nfc.h>
+#include <net/nfc/nci_core.h>
+
+#include "fdp.h"
+
+#define FDP_I2C_DRIVER_NAME	"fdp_nci_i2c"
+
+#define FDP_DP_POWER_GPIO_NAME	"power"
+#define FDP_DP_CLOCK_TYPE_NAME	"clock-type"
+#define FDP_DP_CLOCK_FREQ_NAME	"clock-freq"
+#define FDP_DP_FW_VSC_CFG_NAME	"fw-vsc-cfg"
+
+#define FDP_FRAME_HEADROOM	2
+#define FDP_FRAME_TAILROOM	1
+
+#define FDP_NCI_I2C_MIN_PAYLOAD	5
+#define FDP_NCI_I2C_MAX_PAYLOAD	261
+
+#define FDP_POWER_OFF		0
+#define FDP_POWER_ON		1
+
+#define fdp_nci_i2c_dump_skb(dev, prefix, skb)				\
+	print_hex_dump(KERN_DEBUG, prefix": ", DUMP_PREFIX_OFFSET,	\
+		       16, 1, (skb)->data, (skb)->len, 0)
+
+static void fdp_nci_i2c_reset(struct fdp_i2c_phy *phy)
+{
+	/* Reset RST/WakeUP for at least 100 micro-second */
+	gpiod_set_value_cansleep(phy->power_gpio, FDP_POWER_OFF);
+	usleep_range(1000, 4000);
+	gpiod_set_value_cansleep(phy->power_gpio, FDP_POWER_ON);
+	usleep_range(10000, 14000);
+}
+
+static int fdp_nci_i2c_enable(void *phy_id)
+{
+	struct fdp_i2c_phy *phy = phy_id;
+
+	dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__);
+	fdp_nci_i2c_reset(phy);
+
+	return 0;
+}
+
+static void fdp_nci_i2c_disable(void *phy_id)
+{
+	struct fdp_i2c_phy *phy = phy_id;
+
+	dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__);
+	fdp_nci_i2c_reset(phy);
+}
+
+static void fdp_nci_i2c_add_len_lrc(struct sk_buff *skb)
+{
+	u8 lrc = 0;
+	u16 len, i;
+
+	/* Add length header */
+	len = skb->len;
+	*skb_push(skb, 1) = len & 0xff;
+	*skb_push(skb, 1) = len >> 8;
+
+	/* Compute and add lrc */
+	for (i = 0; i < len + 2; i++)
+		lrc ^= skb->data[i];
+
+	*skb_put(skb, 1) = lrc;
+}
+
+static void fdp_nci_i2c_remove_len_lrc(struct sk_buff *skb)
+{
+	skb_pull(skb, FDP_FRAME_HEADROOM);
+	skb_trim(skb, skb->len - FDP_FRAME_TAILROOM);
+}
+
+static int fdp_nci_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+	struct fdp_i2c_phy *phy = phy_id;
+	struct i2c_client *client = phy->i2c_dev;
+	int r;
+
+	if (phy->hard_fault != 0)
+		return phy->hard_fault;
+
+	fdp_nci_i2c_add_len_lrc(skb);
+	fdp_nci_i2c_dump_skb(&client->dev, "fdp_wr", skb);
+
+	r = i2c_master_send(client, skb->data, skb->len);
+	if (r == -EREMOTEIO) {  /* Retry, chip was in standby */
+		usleep_range(1000, 4000);
+		r = i2c_master_send(client, skb->data, skb->len);
+	}
+
+	if (r < 0 || r != skb->len)
+		dev_dbg(&client->dev, "%s: error err=%d len=%d\n",
+			__func__, r, skb->len);
+
+	if (r >= 0) {
+		if (r != skb->len) {
+			phy->hard_fault = r;
+			r = -EREMOTEIO;
+		} else {
+			r = 0;
+		}
+	}
+
+	fdp_nci_i2c_remove_len_lrc(skb);
+
+	return r;
+}
+
+static struct nfc_phy_ops i2c_phy_ops = {
+	.write = fdp_nci_i2c_write,
+	.enable = fdp_nci_i2c_enable,
+	.disable = fdp_nci_i2c_disable,
+};
+
+static int fdp_nci_i2c_read(struct fdp_i2c_phy *phy, struct sk_buff **skb)
+{
+	int r, len;
+	u8 tmp[FDP_NCI_I2C_MAX_PAYLOAD], lrc, k;
+	u16 i;
+	struct i2c_client *client = phy->i2c_dev;
+
+	*skb = NULL;
+
+	/* Read the length packet and the data packet */
+	for (k = 0; k < 2; k++) {
+
+		len = phy->next_read_size;
+
+		r = i2c_master_recv(client, tmp, len);
+		if (r != len) {
+			dev_dbg(&client->dev, "%s: i2c recv err: %d\n",
+				__func__, r);
+			goto flush;
+		}
+
+		/* Check packet integruty */
+		for (lrc = i = 0; i < r; i++)
+			lrc ^= tmp[i];
+
+		/*
+		 * LRC check failed. This may due to transmission error or
+		 * desynchronization between driver and FDP. Drop the paquet
+		 * and force resynchronization
+		 */
+		if (lrc) {
+			dev_dbg(&client->dev, "%s: corrupted packet\n",
+				__func__);
+			phy->next_read_size = 5;
+			goto flush;
+		}
+
+		/* Packet that contains a length */
+		if (tmp[0] == 0 && tmp[1] == 0) {
+			phy->next_read_size = (tmp[2] << 8) + tmp[3] + 3;
+		} else {
+			phy->next_read_size = FDP_NCI_I2C_MIN_PAYLOAD;
+
+			*skb = alloc_skb(len, GFP_KERNEL);
+			if (*skb == NULL) {
+				r = -ENOMEM;
+				goto flush;
+			}
+
+			memcpy(skb_put(*skb, len), tmp, len);
+			fdp_nci_i2c_dump_skb(&client->dev, "fdp_rd", *skb);
+
+			fdp_nci_i2c_remove_len_lrc(*skb);
+		}
+	}
+
+	return 0;
+
+flush:
+	/* Flush the remaining data */
+	if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0)
+		r = -EREMOTEIO;
+
+	return r;
+}
+
+static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
+{
+	struct fdp_i2c_phy *phy = phy_id;
+	struct i2c_client *client;
+	struct sk_buff *skb;
+	int r;
+
+	client = phy->i2c_dev;
+	dev_dbg(&client->dev, "%s\n", __func__);
+
+	if (!phy || irq != phy->i2c_dev->irq) {
+		WARN_ON_ONCE(1);
+		return IRQ_NONE;
+	}
+
+	r = fdp_nci_i2c_read(phy, &skb);
+
+	if (r == -EREMOTEIO)
+		return IRQ_HANDLED;
+	else if (r == -ENOMEM || r == -EBADMSG)
+		return IRQ_HANDLED;
+
+	if (skb != NULL)
+		fdp_nci_recv_frame(phy->ndev, skb);
+
+	return IRQ_HANDLED;
+}
+
+static void fdp_nci_i2c_read_device_properties(struct device *dev,
+					       u8 *clock_type, u32 *clock_freq,
+					       u8 **fw_vsc_cfg)
+{
+	int r;
+	u8 len;
+
+	r = device_property_read_u8(dev, FDP_DP_CLOCK_TYPE_NAME, clock_type);
+	if (r) {
+		dev_dbg(dev, "Using default clock type");
+		*clock_type = 0;
+	}
+
+	r = device_property_read_u32(dev, FDP_DP_CLOCK_FREQ_NAME, clock_freq);
+	if (r) {
+		dev_dbg(dev, "Using default clock frequency\n");
+		*clock_freq = 26000;
+	}
+
+	if (device_property_present(dev, FDP_DP_FW_VSC_CFG_NAME)) {
+		r = device_property_read_u8(dev, FDP_DP_FW_VSC_CFG_NAME,
+					    &len);
+
+		if (r || len <= 0)
+			goto vsc_read_err;
+
+		/* Add 1 to the length to inclue the length byte itself */
+		len++;
+
+		*fw_vsc_cfg = devm_kmalloc(dev,
+					   len * sizeof(**fw_vsc_cfg),
+					   GFP_KERNEL);
+
+		r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME,
+						  *fw_vsc_cfg, len);
+
+		if (r) {
+			devm_kfree(dev, fw_vsc_cfg);
+			goto vsc_read_err;
+		}
+	} else {
+vsc_read_err:
+		dev_dbg(dev, "FW vendor specific commands not present\n");
+		*fw_vsc_cfg = NULL;
+	}
+
+	dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s",
+		*clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no");
+}
+
+static int fdp_nci_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	struct fdp_i2c_phy *phy;
+	struct device *dev = &client->dev;
+	u8 *fw_vsc_cfg;
+	u8 clock_type;
+	u32 clock_freq;
+	int r = 0;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		nfc_err(dev, "No I2C_FUNC_I2C support\n");
+		return -ENODEV;
+	}
+
+	phy = devm_kzalloc(dev, sizeof(struct fdp_i2c_phy),
+			   GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	phy->i2c_dev = client;
+	phy->next_read_size = FDP_NCI_I2C_MIN_PAYLOAD;
+	i2c_set_clientdata(client, phy);
+
+	/* Checking if we have an irq */
+	if (client->irq <= 0) {
+		dev_err(dev, "IRQ not present\n");
+		return -ENODEV;
+	}
+
+	r = request_threaded_irq(client->irq, NULL, fdp_nci_i2c_irq_thread_fn,
+				 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+				 FDP_I2C_DRIVER_NAME, phy);
+
+	if (r < 0) {
+		nfc_err(&client->dev, "Unable to register IRQ handler\n");
+		return r;
+	}
+
+	/* Requesting the power gpio */
+	phy->power_gpio = devm_gpiod_get(dev, FDP_DP_POWER_GPIO_NAME,
+					 GPIOD_OUT_LOW);
+
+	if (IS_ERR(phy->power_gpio)) {
+		nfc_err(dev, "Power GPIO request failed\n");
+		return PTR_ERR(phy->power_gpio);
+	}
+
+	/* read device properties to get the clock and production settings */
+	fdp_nci_i2c_read_device_properties(dev, &clock_type, &clock_freq,
+					   &fw_vsc_cfg);
+
+	/* Call the NFC specific probe function */
+	r = fdp_nci_probe(phy, &i2c_phy_ops, &phy->ndev,
+			  FDP_FRAME_HEADROOM, FDP_FRAME_TAILROOM,
+			  clock_type, clock_freq, fw_vsc_cfg);
+	if (r < 0) {
+		nfc_err(dev, "NCI probing error\n");
+		return r;
+	}
+
+	dev_dbg(dev, "I2C driver loaded\n");
+	return 0;
+}
+
+static int fdp_nci_i2c_remove(struct i2c_client *client)
+{
+	struct fdp_i2c_phy *phy = i2c_get_clientdata(client);
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+
+	fdp_nci_remove(phy->ndev);
+	fdp_nci_i2c_disable(phy);
+
+	return 0;
+}
+
+static struct i2c_device_id fdp_nci_i2c_id_table[] = {
+	{"int339a", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, fdp_nci_i2c_id_table);
+
+static const struct acpi_device_id fdp_nci_i2c_acpi_match[] = {
+	{"INT339A", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(acpi, fdp_nci_i2c_acpi_match);
+
+static struct i2c_driver fdp_nci_i2c_driver = {
+	.driver = {
+		   .name = FDP_I2C_DRIVER_NAME,
+		   .acpi_match_table = ACPI_PTR(fdp_nci_i2c_acpi_match),
+		  },
+	.id_table = fdp_nci_i2c_id_table,
+	.probe = fdp_nci_i2c_probe,
+	.remove = fdp_nci_i2c_remove,
+};
+module_i2c_driver(fdp_nci_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("I2C driver for Intel Fields Peak NFC controller");
+MODULE_AUTHOR("Robert Dolca <robert.dolca@intel.com>");
diff --git a/drivers/nfc/microread/Kconfig b/drivers/nfc/microread/Kconfig
index 951d554..2c6dbc9 100644
--- a/drivers/nfc/microread/Kconfig
+++ b/drivers/nfc/microread/Kconfig
@@ -1,20 +1,15 @@
 config NFC_MICROREAD
-	tristate "Inside Secure microread NFC driver"
-	depends on NFC_HCI
+	tristate
 	select CRC_CCITT
-	default n
 	---help---
 	  This module contains the main code for Inside Secure microread
 	  NFC chipsets. It implements the chipset HCI logic and hooks into
 	  the NFC kernel APIs. Physical layers will register against it.
 
-	  To compile this driver as a module, choose m here. The module will
-	  be called microread.
-	  Say N if unsure.
-
 config NFC_MICROREAD_I2C
-	tristate "NFC Microread i2c support"
-	depends on NFC_MICROREAD && I2C && NFC_SHDLC
+	tristate "Inside Secure Microread device support (I2C)"
+	depends on NFC_HCI && I2C && NFC_SHDLC
+	select NFC_MICROREAD
 	---help---
 	  This module adds support for the i2c interface of adapters using
 	  Inside microread chipsets.  Select this if your platform is using
@@ -24,8 +19,9 @@
 	  Say N if unsure.
 
 config NFC_MICROREAD_MEI
-	tristate "NFC Microread MEI support"
-	depends on NFC_MICROREAD && NFC_MEI_PHY
+	tristate "Inside Secure Microread device support (MEI)"
+	depends on NFC_HCI && NFC_MEI_PHY
+	select NFC_MICROREAD
 	---help---
 	  This module adds support for the mei interface of adapters using
 	  Inside microread chipsets.  Select this if your microread chipset
diff --git a/drivers/nfc/nfcmrvl/Kconfig b/drivers/nfc/nfcmrvl/Kconfig
index 796be24..444ca946 100644
--- a/drivers/nfc/nfcmrvl/Kconfig
+++ b/drivers/nfc/nfcmrvl/Kconfig
@@ -1,18 +1,15 @@
 config NFC_MRVL
-	tristate "Marvell NFC driver support"
-	depends on NFC_NCI
+	tristate
 	help
 	  The core driver to support Marvell NFC devices.
 
 	  This driver is required if you want to support
 	  Marvell NFC device 8897.
 
-	  Say Y here to compile Marvell NFC driver into the kernel or
-	  say M to compile it as module.
-
 config NFC_MRVL_USB
 	tristate "Marvell NFC-over-USB driver"
-	depends on NFC_MRVL && USB
+	depends on NFC_NCI && USB
+	select NFC_MRVL
 	help
 	  Marvell NFC-over-USB driver.
 
@@ -24,7 +21,8 @@
 
 config NFC_MRVL_UART
 	tristate "Marvell NFC-over-UART driver"
-	depends on NFC_MRVL && NFC_NCI_UART
+	depends on NFC_NCI && NFC_NCI_UART
+	select NFC_MRVL
 	help
 	  Marvell NFC-over-UART driver.
 
@@ -32,3 +30,25 @@
 
 	  Say Y here to compile support for Marvell NFC-over-UART driver
 	  into the kernel or say M to compile it as module.
+
+config NFC_MRVL_I2C
+	tristate "Marvell NFC-over-I2C driver"
+	depends on NFC_MRVL && I2C
+	help
+	  Marvell NFC-over-I2C driver.
+
+	  This driver provides support for Marvell NFC-over-I2C devices.
+
+	  Say Y here to compile support for Marvell NFC-over-I2C driver
+	  into the kernel or say M to compile it as module.
+
+config NFC_MRVL_SPI
+	tristate "Marvell NFC-over-SPI driver"
+	depends on NFC_MRVL && SPI
+	help
+	  Marvell NFC-over-SPI driver.
+
+	  This driver provides support for Marvell NFC-over-SPI devices.
+
+	  Say Y here to compile support for Marvell NFC-over-SPI driver
+	  into the kernel or say M to compile it as module.
diff --git a/drivers/nfc/nfcmrvl/Makefile b/drivers/nfc/nfcmrvl/Makefile
index 7751962..fa07c78 100644
--- a/drivers/nfc/nfcmrvl/Makefile
+++ b/drivers/nfc/nfcmrvl/Makefile
@@ -2,7 +2,7 @@
 # Makefile for NFCMRVL NCI based NFC driver
 #
 
-nfcmrvl-y += main.o
+nfcmrvl-y += main.o fw_dnld.o
 obj-$(CONFIG_NFC_MRVL) += nfcmrvl.o
 
 nfcmrvl_usb-y += usb.o
@@ -10,3 +10,9 @@
 
 nfcmrvl_uart-y += uart.o
 obj-$(CONFIG_NFC_MRVL_UART) += nfcmrvl_uart.o
+
+nfcmrvl_i2c-y += i2c.o
+obj-$(CONFIG_NFC_MRVL_I2C) += nfcmrvl_i2c.o
+
+nfcmrvl_spi-y += spi.o
+obj-$(CONFIG_NFC_MRVL_SPI) += nfcmrvl_spi.o
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
new file mode 100644
index 0000000..bfa7713
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -0,0 +1,553 @@
+/*
+ * Marvell NFC driver: Firmware downloader
+ *
+ * Copyright (C) 2015, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include <linux/module.h>
+#include <linux/unaligned/access_ok.h>
+#include <linux/firmware.h>
+#include <linux/nfc.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include "nfcmrvl.h"
+
+#define FW_DNLD_TIMEOUT			15000
+
+#define NCI_OP_PROPRIETARY_BOOT_CMD	nci_opcode_pack(NCI_GID_PROPRIETARY, \
+							NCI_OP_PROP_BOOT_CMD)
+
+/* FW download states */
+
+enum {
+	STATE_RESET = 0,
+	STATE_INIT,
+	STATE_SET_REF_CLOCK,
+	STATE_SET_HI_CONFIG,
+	STATE_OPEN_LC,
+	STATE_FW_DNLD,
+	STATE_CLOSE_LC,
+	STATE_BOOT
+};
+
+enum {
+	SUBSTATE_WAIT_COMMAND = 0,
+	SUBSTATE_WAIT_ACK_CREDIT,
+	SUBSTATE_WAIT_NACK_CREDIT,
+	SUBSTATE_WAIT_DATA_CREDIT,
+};
+
+/*
+** Patterns for responses
+*/
+
+static const uint8_t nci_pattern_core_reset_ntf[] = {
+	0x60, 0x00, 0x02, 0xA0, 0x01
+};
+
+static const uint8_t nci_pattern_core_init_rsp[] = {
+	0x40, 0x01, 0x11
+};
+
+static const uint8_t nci_pattern_core_set_config_rsp[] = {
+	0x40, 0x02, 0x02, 0x00, 0x00
+};
+
+static const uint8_t nci_pattern_core_conn_create_rsp[] = {
+	0x40, 0x04, 0x04, 0x00
+};
+
+static const uint8_t nci_pattern_core_conn_close_rsp[] = {
+	0x40, 0x05, 0x01, 0x00
+};
+
+static const uint8_t nci_pattern_core_conn_credits_ntf[] = {
+	0x60, 0x06, 0x03, 0x01, NCI_CORE_LC_CONNID_PROP_FW_DL, 0x01
+};
+
+static const uint8_t nci_pattern_proprietary_boot_rsp[] = {
+	0x4F, 0x3A, 0x01, 0x00
+};
+
+static struct sk_buff *alloc_lc_skb(struct nfcmrvl_private *priv, uint8_t plen)
+{
+	struct sk_buff *skb;
+	struct nci_data_hdr *hdr;
+
+	skb = nci_skb_alloc(priv->ndev, (NCI_DATA_HDR_SIZE + plen), GFP_KERNEL);
+	if (!skb) {
+		pr_err("no memory for data\n");
+		return NULL;
+	}
+
+	hdr = (struct nci_data_hdr *) skb_put(skb, NCI_DATA_HDR_SIZE);
+	hdr->conn_id = NCI_CORE_LC_CONNID_PROP_FW_DL;
+	hdr->rfu = 0;
+	hdr->plen = plen;
+
+	nci_mt_set((__u8 *)hdr, NCI_MT_DATA_PKT);
+	nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
+
+	return skb;
+}
+
+static void fw_dnld_over(struct nfcmrvl_private *priv, u32 error)
+{
+	if (priv->fw_dnld.fw) {
+		release_firmware(priv->fw_dnld.fw);
+		priv->fw_dnld.fw = NULL;
+		priv->fw_dnld.header = NULL;
+		priv->fw_dnld.binary_config = NULL;
+	}
+
+	atomic_set(&priv->ndev->cmd_cnt, 0);
+	del_timer_sync(&priv->ndev->cmd_timer);
+
+	del_timer_sync(&priv->fw_dnld.timer);
+
+	nfc_info(priv->dev, "FW loading over (%d)]\n", error);
+
+	if (error != 0) {
+		/* failed, halt the chip to avoid power consumption */
+		nfcmrvl_chip_halt(priv);
+	}
+
+	nfc_fw_download_done(priv->ndev->nfc_dev, priv->fw_dnld.name, error);
+}
+
+static void fw_dnld_timeout(unsigned long arg)
+{
+	struct nfcmrvl_private *priv = (struct nfcmrvl_private *) arg;
+
+	nfc_err(priv->dev, "FW loading timeout");
+	priv->fw_dnld.state = STATE_RESET;
+	fw_dnld_over(priv, -ETIMEDOUT);
+}
+
+static int process_state_reset(struct nfcmrvl_private *priv,
+			       struct sk_buff *skb)
+{
+	if (sizeof(nci_pattern_core_reset_ntf) != skb->len ||
+	    memcmp(skb->data, nci_pattern_core_reset_ntf,
+		   sizeof(nci_pattern_core_reset_ntf)))
+		return -EINVAL;
+
+	nfc_info(priv->dev, "BootROM reset, start fw download\n");
+
+	/* Start FW download state machine */
+	priv->fw_dnld.state = STATE_INIT;
+	nci_send_cmd(priv->ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
+
+	return 0;
+}
+
+static int process_state_init(struct nfcmrvl_private *priv, struct sk_buff *skb)
+{
+	struct nci_core_set_config_cmd cmd;
+
+	if (sizeof(nci_pattern_core_init_rsp) >= skb->len ||
+	    memcmp(skb->data, nci_pattern_core_init_rsp,
+		   sizeof(nci_pattern_core_init_rsp)))
+		return -EINVAL;
+
+	cmd.num_params = 1;
+	cmd.param.id = NFCMRVL_PROP_REF_CLOCK;
+	cmd.param.len = 4;
+	memcpy(cmd.param.val, &priv->fw_dnld.header->ref_clock, 4);
+
+	nci_send_cmd(priv->ndev, NCI_OP_CORE_SET_CONFIG_CMD, 3 + cmd.param.len,
+		     &cmd);
+
+	priv->fw_dnld.state = STATE_SET_REF_CLOCK;
+	return 0;
+}
+
+static void create_lc(struct nfcmrvl_private *priv)
+{
+	uint8_t param[2] = { NCI_CORE_LC_PROP_FW_DL, 0x0 };
+
+	priv->fw_dnld.state = STATE_OPEN_LC;
+	nci_send_cmd(priv->ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, param);
+}
+
+static int process_state_set_ref_clock(struct nfcmrvl_private *priv,
+				       struct sk_buff *skb)
+{
+	struct nci_core_set_config_cmd cmd;
+
+	if (sizeof(nci_pattern_core_set_config_rsp) != skb->len ||
+	    memcmp(skb->data, nci_pattern_core_set_config_rsp, skb->len))
+		return -EINVAL;
+
+	cmd.num_params = 1;
+	cmd.param.id = NFCMRVL_PROP_SET_HI_CONFIG;
+
+	switch (priv->phy) {
+	case NFCMRVL_PHY_UART:
+		cmd.param.len = 5;
+		memcpy(cmd.param.val,
+		       &priv->fw_dnld.binary_config->uart.baudrate,
+		       4);
+		cmd.param.val[4] =
+			priv->fw_dnld.binary_config->uart.flow_control;
+		break;
+	case NFCMRVL_PHY_I2C:
+		cmd.param.len = 5;
+		memcpy(cmd.param.val,
+		       &priv->fw_dnld.binary_config->i2c.clk,
+		       4);
+		cmd.param.val[4] = 0;
+		break;
+	case NFCMRVL_PHY_SPI:
+		cmd.param.len = 5;
+		memcpy(cmd.param.val,
+		       &priv->fw_dnld.binary_config->spi.clk,
+		       4);
+		cmd.param.val[4] = 0;
+		break;
+	default:
+		create_lc(priv);
+		return 0;
+	}
+
+	priv->fw_dnld.state = STATE_SET_HI_CONFIG;
+	nci_send_cmd(priv->ndev, NCI_OP_CORE_SET_CONFIG_CMD, 3 + cmd.param.len,
+		     &cmd);
+	return 0;
+}
+
+static int process_state_set_hi_config(struct nfcmrvl_private *priv,
+				       struct sk_buff *skb)
+{
+	if (sizeof(nci_pattern_core_set_config_rsp) != skb->len ||
+	    memcmp(skb->data, nci_pattern_core_set_config_rsp, skb->len))
+		return -EINVAL;
+
+	create_lc(priv);
+	return 0;
+}
+
+static int process_state_open_lc(struct nfcmrvl_private *priv,
+				 struct sk_buff *skb)
+{
+	if (sizeof(nci_pattern_core_conn_create_rsp) >= skb->len ||
+	    memcmp(skb->data, nci_pattern_core_conn_create_rsp,
+		   sizeof(nci_pattern_core_conn_create_rsp)))
+		return -EINVAL;
+
+	priv->fw_dnld.state = STATE_FW_DNLD;
+	priv->fw_dnld.substate = SUBSTATE_WAIT_COMMAND;
+	priv->fw_dnld.offset = priv->fw_dnld.binary_config->offset;
+	return 0;
+}
+
+static int process_state_fw_dnld(struct nfcmrvl_private *priv,
+				 struct sk_buff *skb)
+{
+	uint16_t len;
+	uint16_t comp_len;
+	struct sk_buff *out_skb;
+
+	switch (priv->fw_dnld.substate) {
+	case SUBSTATE_WAIT_COMMAND:
+		/*
+		 * Command format:
+		 * B0..2: NCI header
+		 * B3   : Helper command (0xA5)
+		 * B4..5: le16 data size
+		 * B6..7: le16 data size complement (~)
+		 * B8..N: payload
+		 */
+
+		/* Remove NCI HDR */
+		skb_pull(skb, 3);
+		if (skb->data[0] != HELPER_CMD_PACKET_FORMAT || skb->len != 5) {
+			nfc_err(priv->dev, "bad command");
+			return -EINVAL;
+		}
+		skb_pull(skb, 1);
+		memcpy(&len, skb->data, 2);
+		skb_pull(skb, 2);
+		memcpy(&comp_len, skb->data, 2);
+		skb_pull(skb, 2);
+		len = get_unaligned_le16(&len);
+		comp_len = get_unaligned_le16(&comp_len);
+		if (((~len) & 0xFFFF) != comp_len) {
+			nfc_err(priv->dev, "bad len complement: %x %x %x",
+				len, comp_len, (~len & 0xFFFF));
+			out_skb = alloc_lc_skb(priv, 1);
+			if (!out_skb)
+				return -ENOMEM;
+			*skb_put(out_skb, 1) = 0xBF;
+			nci_send_frame(priv->ndev, out_skb);
+			priv->fw_dnld.substate = SUBSTATE_WAIT_NACK_CREDIT;
+			return 0;
+		}
+		priv->fw_dnld.chunk_len = len;
+		out_skb = alloc_lc_skb(priv, 1);
+		if (!out_skb)
+			return -ENOMEM;
+		*skb_put(out_skb, 1) = HELPER_ACK_PACKET_FORMAT;
+		nci_send_frame(priv->ndev, out_skb);
+		priv->fw_dnld.substate = SUBSTATE_WAIT_ACK_CREDIT;
+		break;
+
+	case SUBSTATE_WAIT_ACK_CREDIT:
+		if (sizeof(nci_pattern_core_conn_credits_ntf) != skb->len ||
+		    memcmp(nci_pattern_core_conn_credits_ntf, skb->data,
+			   skb->len)) {
+			nfc_err(priv->dev, "bad packet: waiting for credit");
+			return -EINVAL;
+		}
+		if (priv->fw_dnld.chunk_len == 0) {
+			/* FW Loading is done */
+			uint8_t conn_id = NCI_CORE_LC_CONNID_PROP_FW_DL;
+
+			priv->fw_dnld.state = STATE_CLOSE_LC;
+			nci_send_cmd(priv->ndev, NCI_OP_CORE_CONN_CLOSE_CMD,
+				     1, &conn_id);
+		} else {
+			out_skb = alloc_lc_skb(priv, priv->fw_dnld.chunk_len);
+			if (!out_skb)
+				return -ENOMEM;
+			memcpy(skb_put(out_skb, priv->fw_dnld.chunk_len),
+			       ((uint8_t *)priv->fw_dnld.fw->data) +
+			       priv->fw_dnld.offset,
+			       priv->fw_dnld.chunk_len);
+			nci_send_frame(priv->ndev, out_skb);
+			priv->fw_dnld.substate = SUBSTATE_WAIT_DATA_CREDIT;
+		}
+		break;
+
+	case SUBSTATE_WAIT_DATA_CREDIT:
+		if (sizeof(nci_pattern_core_conn_credits_ntf) != skb->len ||
+		    memcmp(nci_pattern_core_conn_credits_ntf, skb->data,
+			    skb->len)) {
+			nfc_err(priv->dev, "bad packet: waiting for credit");
+			return -EINVAL;
+		}
+		priv->fw_dnld.offset += priv->fw_dnld.chunk_len;
+		priv->fw_dnld.chunk_len = 0;
+		priv->fw_dnld.substate = SUBSTATE_WAIT_COMMAND;
+		break;
+
+	case SUBSTATE_WAIT_NACK_CREDIT:
+		if (sizeof(nci_pattern_core_conn_credits_ntf) != skb->len ||
+		    memcmp(nci_pattern_core_conn_credits_ntf, skb->data,
+			    skb->len)) {
+			nfc_err(priv->dev, "bad packet: waiting for credit");
+			return -EINVAL;
+		}
+		priv->fw_dnld.substate = SUBSTATE_WAIT_COMMAND;
+		break;
+	}
+	return 0;
+}
+
+static int process_state_close_lc(struct nfcmrvl_private *priv,
+				  struct sk_buff *skb)
+{
+	if (sizeof(nci_pattern_core_conn_close_rsp) != skb->len ||
+	    memcmp(skb->data, nci_pattern_core_conn_close_rsp, skb->len))
+		return -EINVAL;
+
+	priv->fw_dnld.state = STATE_BOOT;
+	nci_send_cmd(priv->ndev, NCI_OP_PROPRIETARY_BOOT_CMD, 0, NULL);
+	return 0;
+}
+
+static int process_state_boot(struct nfcmrvl_private *priv, struct sk_buff *skb)
+{
+	if (sizeof(nci_pattern_proprietary_boot_rsp) != skb->len ||
+	    memcmp(skb->data, nci_pattern_proprietary_boot_rsp, skb->len))
+		return -EINVAL;
+
+	/*
+	 * Update HI config to use the right configuration for the next
+	 * data exchanges.
+	 */
+	priv->if_ops->nci_update_config(priv,
+					&priv->fw_dnld.binary_config->config);
+
+	if (priv->fw_dnld.binary_config == &priv->fw_dnld.header->helper) {
+		/*
+		 * This is the case where an helper was needed and we have
+		 * uploaded it. Now we have to wait the next RESET NTF to start
+		 * FW download.
+		 */
+		priv->fw_dnld.state = STATE_RESET;
+		priv->fw_dnld.binary_config = &priv->fw_dnld.header->firmware;
+		nfc_info(priv->dev, "FW loading: helper loaded");
+	} else {
+		nfc_info(priv->dev, "FW loading: firmware loaded");
+		fw_dnld_over(priv, 0);
+	}
+	return 0;
+}
+
+static void fw_dnld_rx_work(struct work_struct *work)
+{
+	int ret;
+	struct sk_buff *skb;
+	struct nfcmrvl_fw_dnld *fw_dnld = container_of(work,
+						       struct nfcmrvl_fw_dnld,
+						       rx_work);
+	struct nfcmrvl_private *priv = container_of(fw_dnld,
+						    struct nfcmrvl_private,
+						    fw_dnld);
+
+	while ((skb = skb_dequeue(&fw_dnld->rx_q))) {
+		nfc_send_to_raw_sock(priv->ndev->nfc_dev, skb,
+				     RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
+		switch (fw_dnld->state) {
+		case STATE_RESET:
+			ret = process_state_reset(priv, skb);
+			break;
+		case STATE_INIT:
+			ret = process_state_init(priv, skb);
+			break;
+		case STATE_SET_REF_CLOCK:
+			ret = process_state_set_ref_clock(priv, skb);
+			break;
+		case STATE_SET_HI_CONFIG:
+			ret = process_state_set_hi_config(priv, skb);
+			break;
+		case STATE_OPEN_LC:
+			ret = process_state_open_lc(priv, skb);
+			break;
+		case STATE_FW_DNLD:
+			ret = process_state_fw_dnld(priv, skb);
+			break;
+		case STATE_CLOSE_LC:
+			ret = process_state_close_lc(priv, skb);
+			break;
+		case STATE_BOOT:
+			ret = process_state_boot(priv, skb);
+			break;
+		default:
+			ret = -EFAULT;
+		}
+
+		kfree_skb(skb);
+
+		if (ret != 0) {
+			nfc_err(priv->dev, "FW loading error");
+			fw_dnld_over(priv, ret);
+			break;
+		}
+	}
+}
+
+int	nfcmrvl_fw_dnld_init(struct nfcmrvl_private *priv)
+{
+	char name[32];
+
+	INIT_WORK(&priv->fw_dnld.rx_work, fw_dnld_rx_work);
+	snprintf(name, sizeof(name), "%s_nfcmrvl_fw_dnld_rx_wq",
+		 dev_name(priv->dev));
+	priv->fw_dnld.rx_wq = create_singlethread_workqueue(name);
+	if (!priv->fw_dnld.rx_wq)
+		return -ENOMEM;
+	skb_queue_head_init(&priv->fw_dnld.rx_q);
+	return 0;
+}
+
+void	nfcmrvl_fw_dnld_deinit(struct nfcmrvl_private *priv)
+{
+	destroy_workqueue(priv->fw_dnld.rx_wq);
+}
+
+void	nfcmrvl_fw_dnld_recv_frame(struct nfcmrvl_private *priv,
+				   struct sk_buff *skb)
+{
+	/* Allow next command */
+	atomic_set(&priv->ndev->cmd_cnt, 1);
+	del_timer_sync(&priv->ndev->cmd_timer);
+
+	/* Queue and trigger rx work */
+	skb_queue_tail(&priv->fw_dnld.rx_q, skb);
+	queue_work(priv->fw_dnld.rx_wq, &priv->fw_dnld.rx_work);
+}
+
+void nfcmrvl_fw_dnld_abort(struct nfcmrvl_private *priv)
+{
+	fw_dnld_over(priv, -EHOSTDOWN);
+}
+
+int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name)
+{
+	struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
+	struct nfcmrvl_fw_dnld *fw_dnld = &priv->fw_dnld;
+
+	if (!priv->support_fw_dnld)
+		return -ENOTSUPP;
+
+	if (!firmware_name || !firmware_name[0])
+		return -EINVAL;
+
+	strcpy(fw_dnld->name, firmware_name);
+
+	/*
+	 * Retrieve FW binary file and parse it to initialize FW download
+	 * state machine.
+	 */
+
+	/* Retrieve FW binary */
+	if (request_firmware(&fw_dnld->fw, firmware_name, priv->dev) < 0) {
+		nfc_err(priv->dev, "failed to retrieve FW %s", firmware_name);
+		return -ENOENT;
+	}
+
+	fw_dnld->header = (const struct nfcmrvl_fw *) priv->fw_dnld.fw->data;
+
+	if (fw_dnld->header->magic != NFCMRVL_FW_MAGIC ||
+	    fw_dnld->header->phy != priv->phy) {
+		nfc_err(priv->dev, "bad firmware binary %s magic=0x%x phy=%d",
+			firmware_name, fw_dnld->header->magic,
+			fw_dnld->header->phy);
+		release_firmware(fw_dnld->fw);
+		fw_dnld->header = NULL;
+		return -EINVAL;
+	}
+
+	if (fw_dnld->header->helper.offset != 0) {
+		nfc_info(priv->dev, "loading helper");
+		fw_dnld->binary_config = &fw_dnld->header->helper;
+	} else {
+		nfc_info(priv->dev, "loading firmware");
+		fw_dnld->binary_config = &fw_dnld->header->firmware;
+	}
+
+	/* Configure a timer for timeout */
+	setup_timer(&priv->fw_dnld.timer, fw_dnld_timeout,
+		    (unsigned long) priv);
+	mod_timer(&priv->fw_dnld.timer,
+		  jiffies + msecs_to_jiffies(FW_DNLD_TIMEOUT));
+
+	/* Ronfigure HI to be sure that it is the bootrom values */
+	priv->if_ops->nci_update_config(priv,
+					&fw_dnld->header->bootrom.config);
+
+	/* Allow first command */
+	atomic_set(&priv->ndev->cmd_cnt, 1);
+
+	/* First, reset the chip */
+	priv->fw_dnld.state = STATE_RESET;
+	nfcmrvl_chip_reset(priv);
+
+	/* Now wait for CORE_RESET_NTF or timeout */
+
+	return 0;
+}
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.h b/drivers/nfc/nfcmrvl/fw_dnld.h
new file mode 100644
index 0000000..ee4a339
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/fw_dnld.h
@@ -0,0 +1,98 @@
+/**
+ * Marvell NFC driver: Firmware downloader
+ *
+ * Copyright (C) 2015, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ **/
+
+#ifndef __NFCMRVL_FW_DNLD_H__
+#define __NFCMRVL_FW_DNLD_H__
+
+#include <linux/workqueue.h>
+
+#define NFCMRVL_FW_MAGIC		0x88888888
+
+#define NCI_OP_PROP_BOOT_CMD		0x3A
+
+#define NCI_CORE_LC_PROP_FW_DL		0xFD
+#define NCI_CORE_LC_CONNID_PROP_FW_DL	0x02
+
+#define HELPER_CMD_ENTRY_POINT		0x04
+#define HELPER_CMD_PACKET_FORMAT	0xA5
+#define HELPER_ACK_PACKET_FORMAT	0x5A
+#define HELPER_RETRY_REQUESTED		(1 << 15)
+
+struct nfcmrvl_private;
+
+struct nfcmrvl_fw_uart_config {
+	uint8_t flow_control;
+	uint32_t baudrate;
+} __packed;
+
+struct nfcmrvl_fw_i2c_config {
+	uint32_t clk;
+} __packed;
+
+struct nfcmrvl_fw_spi_config {
+	uint32_t clk;
+} __packed;
+
+struct nfcmrvl_fw_binary_config {
+	uint32_t offset;
+	union {
+		void *config;
+		struct nfcmrvl_fw_uart_config uart;
+		struct nfcmrvl_fw_i2c_config i2c;
+		struct nfcmrvl_fw_spi_config spi;
+		uint8_t reserved[64];
+	};
+} __packed;
+
+struct nfcmrvl_fw {
+	uint32_t magic;
+	uint32_t ref_clock;
+	uint32_t phy;
+	struct nfcmrvl_fw_binary_config bootrom;
+	struct nfcmrvl_fw_binary_config helper;
+	struct nfcmrvl_fw_binary_config firmware;
+	uint8_t reserved[64];
+} __packed;
+
+struct nfcmrvl_fw_dnld {
+	char name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+	const struct firmware *fw;
+
+	const struct nfcmrvl_fw *header;
+	const struct nfcmrvl_fw_binary_config *binary_config;
+
+	int state;
+	int substate;
+	int offset;
+	int chunk_len;
+
+	struct workqueue_struct	*rx_wq;
+	struct work_struct rx_work;
+	struct sk_buff_head rx_q;
+
+	struct timer_list timer;
+};
+
+int nfcmrvl_fw_dnld_init(struct nfcmrvl_private *priv);
+void nfcmrvl_fw_dnld_deinit(struct nfcmrvl_private *priv);
+void nfcmrvl_fw_dnld_abort(struct nfcmrvl_private *priv);
+int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name);
+void nfcmrvl_fw_dnld_recv_frame(struct nfcmrvl_private *priv,
+				struct sk_buff *skb);
+
+#endif
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
new file mode 100644
index 0000000..78b7aa8
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -0,0 +1,290 @@
+/**
+ * Marvell NFC-over-I2C driver: I2C interface related functions
+ *
+ * Copyright (C) 2015, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ **/
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/nfc.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include "nfcmrvl.h"
+
+struct nfcmrvl_i2c_drv_data {
+	unsigned long flags;
+	struct device *dev;
+	struct i2c_client *i2c;
+	struct nfcmrvl_private *priv;
+};
+
+static int nfcmrvl_i2c_read(struct nfcmrvl_i2c_drv_data *drv_data,
+			    struct sk_buff **skb)
+{
+	int ret;
+	struct nci_ctrl_hdr nci_hdr;
+
+	/* Read NCI header to know the payload size */
+	ret = i2c_master_recv(drv_data->i2c, (u8 *)&nci_hdr, NCI_CTRL_HDR_SIZE);
+	if (ret != NCI_CTRL_HDR_SIZE) {
+		nfc_err(&drv_data->i2c->dev, "cannot read NCI header\n");
+		return -EBADMSG;
+	}
+
+	if (nci_hdr.plen > NCI_MAX_PAYLOAD_SIZE) {
+		nfc_err(&drv_data->i2c->dev, "invalid packet payload size\n");
+		return -EBADMSG;
+	}
+
+	*skb = nci_skb_alloc(drv_data->priv->ndev,
+			     nci_hdr.plen + NCI_CTRL_HDR_SIZE, GFP_KERNEL);
+	if (!*skb)
+		return -ENOMEM;
+
+	/* Copy NCI header into the SKB */
+	memcpy(skb_put(*skb, NCI_CTRL_HDR_SIZE), &nci_hdr, NCI_CTRL_HDR_SIZE);
+
+	if (nci_hdr.plen) {
+		/* Read the NCI payload */
+		ret = i2c_master_recv(drv_data->i2c,
+				      skb_put(*skb, nci_hdr.plen),
+				      nci_hdr.plen);
+
+		if (ret != nci_hdr.plen) {
+			nfc_err(&drv_data->i2c->dev,
+				"Invalid frame payload length: %u (expected %u)\n",
+				ret, nci_hdr.plen);
+			kfree_skb(*skb);
+			return -EBADMSG;
+		}
+	}
+
+	return 0;
+}
+
+static irqreturn_t nfcmrvl_i2c_int_irq_thread_fn(int irq, void *drv_data_ptr)
+{
+	struct nfcmrvl_i2c_drv_data *drv_data = drv_data_ptr;
+	struct sk_buff *skb = NULL;
+	int ret;
+
+	if (!drv_data->priv)
+		return IRQ_HANDLED;
+
+	if (test_bit(NFCMRVL_PHY_ERROR, &drv_data->priv->flags))
+		return IRQ_HANDLED;
+
+	ret = nfcmrvl_i2c_read(drv_data, &skb);
+
+	switch (ret) {
+	case -EREMOTEIO:
+		set_bit(NFCMRVL_PHY_ERROR, &drv_data->priv->flags);
+		break;
+	case -ENOMEM:
+	case -EBADMSG:
+		nfc_err(&drv_data->i2c->dev, "read failed %d\n", ret);
+		break;
+	default:
+		if (nfcmrvl_nci_recv_frame(drv_data->priv, skb) < 0)
+			nfc_err(&drv_data->i2c->dev, "corrupted RX packet\n");
+		break;
+	}
+	return IRQ_HANDLED;
+}
+
+static int nfcmrvl_i2c_nci_open(struct nfcmrvl_private *priv)
+{
+	struct nfcmrvl_i2c_drv_data *drv_data = priv->drv_data;
+
+	if (!drv_data)
+		return -ENODEV;
+
+	return 0;
+}
+
+static int nfcmrvl_i2c_nci_close(struct nfcmrvl_private *priv)
+{
+	return 0;
+}
+
+static int nfcmrvl_i2c_nci_send(struct nfcmrvl_private *priv,
+				struct sk_buff *skb)
+{
+	struct nfcmrvl_i2c_drv_data *drv_data = priv->drv_data;
+	int ret;
+
+	if (test_bit(NFCMRVL_PHY_ERROR, &priv->flags))
+		return -EREMOTEIO;
+
+	ret = i2c_master_send(drv_data->i2c, skb->data, skb->len);
+
+	/* Retry if chip was in standby */
+	if (ret == -EREMOTEIO) {
+		nfc_info(drv_data->dev, "chip may sleep, retry\n");
+		usleep_range(6000, 10000);
+		ret = i2c_master_send(drv_data->i2c, skb->data, skb->len);
+	}
+
+	if (ret >= 0) {
+		if (ret != skb->len) {
+			nfc_err(drv_data->dev,
+				"Invalid length sent: %u (expected %u)\n",
+				ret, skb->len);
+			ret = -EREMOTEIO;
+		} else
+			ret = 0;
+		kfree_skb(skb);
+	}
+
+	return ret;
+}
+
+static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv,
+					  const void *param)
+{
+}
+
+static struct nfcmrvl_if_ops i2c_ops = {
+	.nci_open = nfcmrvl_i2c_nci_open,
+	.nci_close = nfcmrvl_i2c_nci_close,
+	.nci_send = nfcmrvl_i2c_nci_send,
+	.nci_update_config = nfcmrvl_i2c_nci_update_config,
+};
+
+static int nfcmrvl_i2c_parse_dt(struct device_node *node,
+				struct nfcmrvl_platform_data *pdata)
+{
+	int ret;
+
+	ret = nfcmrvl_parse_dt(node, pdata);
+	if (ret < 0) {
+		pr_err("Failed to get generic entries\n");
+		return ret;
+	}
+
+	if (of_find_property(node, "i2c-int-falling", NULL))
+		pdata->irq_polarity = IRQF_TRIGGER_FALLING;
+	else
+		pdata->irq_polarity = IRQF_TRIGGER_RISING;
+
+	ret = irq_of_parse_and_map(node, 0);
+	if (ret < 0) {
+		pr_err("Unable to get irq, error: %d\n", ret);
+		return ret;
+	}
+	pdata->irq = ret;
+
+	return 0;
+}
+
+static int nfcmrvl_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	struct nfcmrvl_i2c_drv_data *drv_data;
+	struct nfcmrvl_platform_data *pdata;
+	struct nfcmrvl_platform_data config;
+	int ret;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
+		return -ENODEV;
+	}
+
+	drv_data = devm_kzalloc(&client->dev, sizeof(*drv_data), GFP_KERNEL);
+	if (!drv_data)
+		return -ENOMEM;
+
+	drv_data->i2c = client;
+	drv_data->dev = &client->dev;
+	drv_data->priv = NULL;
+
+	i2c_set_clientdata(client, drv_data);
+
+	pdata = client->dev.platform_data;
+
+	if (!pdata && client->dev.of_node)
+		if (nfcmrvl_i2c_parse_dt(client->dev.of_node, &config) == 0)
+			pdata = &config;
+
+	if (!pdata)
+		return -EINVAL;
+
+	/* Request the read IRQ */
+	ret = devm_request_threaded_irq(&drv_data->i2c->dev, pdata->irq,
+					NULL, nfcmrvl_i2c_int_irq_thread_fn,
+					pdata->irq_polarity | IRQF_ONESHOT,
+					"nfcmrvl_i2c_int", drv_data);
+	if (ret < 0) {
+		nfc_err(&drv_data->i2c->dev,
+			"Unable to register IRQ handler\n");
+		return ret;
+	}
+
+	drv_data->priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_I2C,
+						  drv_data, &i2c_ops,
+						  &drv_data->i2c->dev, pdata);
+
+	if (IS_ERR(drv_data->priv))
+		return PTR_ERR(drv_data->priv);
+
+	drv_data->priv->support_fw_dnld = true;
+
+	return 0;
+}
+
+static int nfcmrvl_i2c_remove(struct i2c_client *client)
+{
+	struct nfcmrvl_i2c_drv_data *drv_data = i2c_get_clientdata(client);
+
+	nfcmrvl_nci_unregister_dev(drv_data->priv);
+
+	return 0;
+}
+
+
+static const struct of_device_id of_nfcmrvl_i2c_match[] = {
+	{ .compatible = "marvell,nfc-i2c", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_nfcmrvl_i2c_match);
+
+static struct i2c_device_id nfcmrvl_i2c_id_table[] = {
+	{ "nfcmrvl_i2c", 0 },
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, nfcmrvl_i2c_id_table);
+
+static struct i2c_driver nfcmrvl_i2c_driver = {
+	.probe = nfcmrvl_i2c_probe,
+	.id_table = nfcmrvl_i2c_id_table,
+	.remove = nfcmrvl_i2c_remove,
+	.driver = {
+		.name		= "nfcmrvl_i2c",
+		.owner		= THIS_MODULE,
+		.of_match_table	= of_match_ptr(of_nfcmrvl_i2c_match),
+	},
+};
+
+module_i2c_driver(nfcmrvl_i2c_driver);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell NFC-over-I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index 4a8866d..8079ae0 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -1,7 +1,7 @@
 /*
  * Marvell NFC driver: major functions
  *
- * Copyright (C) 2014, Marvell International Ltd.
+ * Copyright (C) 2014-2015 Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -25,8 +25,6 @@
 #include <net/nfc/nci_core.h>
 #include "nfcmrvl.h"
 
-#define VERSION "1.0"
-
 static int nfcmrvl_nci_open(struct nci_dev *ndev)
 {
 	struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
@@ -35,6 +33,9 @@
 	if (test_and_set_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
 		return 0;
 
+	/* Reset possible fault of previous session */
+	clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
+
 	err = priv->if_ops->nci_open(priv);
 
 	if (err)
@@ -63,9 +64,6 @@
 
 	skb->dev = (void *)ndev;
 
-	if (!test_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
-		return -EBUSY;
-
 	if (priv->config.hci_muxed) {
 		unsigned char *hdr;
 		unsigned char len = skb->len;
@@ -88,21 +86,30 @@
 	return 0;
 }
 
+static int nfcmrvl_nci_fw_download(struct nci_dev *ndev,
+				   const char *firmware_name)
+{
+	return nfcmrvl_fw_dnld_start(ndev, firmware_name);
+}
+
 static struct nci_ops nfcmrvl_nci_ops = {
 	.open = nfcmrvl_nci_open,
 	.close = nfcmrvl_nci_close,
 	.send = nfcmrvl_nci_send,
 	.setup = nfcmrvl_nci_setup,
+	.fw_download = nfcmrvl_nci_fw_download,
 };
 
-struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
+struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy,
+				void *drv_data,
 				struct nfcmrvl_if_ops *ops,
 				struct device *dev,
 				struct nfcmrvl_platform_data *pdata)
 {
 	struct nfcmrvl_private *priv;
 	int rc;
-	int headroom = 0;
+	int headroom;
+	int tailroom;
 	u32 protocols;
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -112,6 +119,7 @@
 	priv->drv_data = drv_data;
 	priv->if_ops = ops;
 	priv->dev = dev;
+	priv->phy = phy;
 
 	memcpy(&priv->config, pdata, sizeof(*pdata));
 
@@ -124,8 +132,14 @@
 			nfc_err(dev, "failed to request reset_n io\n");
 	}
 
+	if (phy == NFCMRVL_PHY_SPI) {
+		headroom = NCI_SPI_HDR_LEN;
+		tailroom = 1;
+	} else
+		headroom = tailroom = 0;
+
 	if (priv->config.hci_muxed)
-		headroom = NFCMRVL_HCI_EVENT_HEADER_SIZE;
+		headroom += NFCMRVL_HCI_EVENT_HEADER_SIZE;
 
 	protocols = NFC_PROTO_JEWEL_MASK
 		| NFC_PROTO_MIFARE_MASK
@@ -136,7 +150,7 @@
 		| NFC_PROTO_NFC_DEP_MASK;
 
 	priv->ndev = nci_allocate_device(&nfcmrvl_nci_ops, protocols,
-					 headroom, 0);
+					 headroom, tailroom);
 	if (!priv->ndev) {
 		nfc_err(dev, "nci_allocate_device failed\n");
 		rc = -ENOMEM;
@@ -145,18 +159,26 @@
 
 	nci_set_drvdata(priv->ndev, priv);
 
-	nfcmrvl_chip_reset(priv);
-
 	rc = nci_register_device(priv->ndev);
 	if (rc) {
 		nfc_err(dev, "nci_register_device failed %d\n", rc);
-		nci_free_device(priv->ndev);
-		goto error;
+		goto error_free_dev;
+	}
+
+	/* Ensure that controller is powered off */
+	nfcmrvl_chip_halt(priv);
+
+	rc = nfcmrvl_fw_dnld_init(priv);
+	if (rc) {
+		nfc_err(dev, "failed to initialize FW download %d\n", rc);
+		goto error_free_dev;
 	}
 
 	nfc_info(dev, "registered with nci successfully\n");
 	return priv;
 
+error_free_dev:
+	nci_free_device(priv->ndev);
 error:
 	kfree(priv);
 	return ERR_PTR(rc);
@@ -167,6 +189,11 @@
 {
 	struct nci_dev *ndev = priv->ndev;
 
+	if (priv->ndev->nfc_dev->fw_download_in_progress)
+		nfcmrvl_fw_dnld_abort(priv);
+
+	nfcmrvl_fw_dnld_deinit(priv);
+
 	nci_unregister_device(ndev);
 	nci_free_device(ndev);
 	kfree(priv);
@@ -187,6 +214,11 @@
 		}
 	}
 
+	if (priv->ndev->nfc_dev->fw_download_in_progress) {
+		nfcmrvl_fw_dnld_recv_frame(priv, skb);
+		return 0;
+	}
+
 	if (test_bit(NFCMRVL_NCI_RUNNING, &priv->flags))
 		nci_recv_frame(priv->ndev, skb);
 	else {
@@ -201,10 +233,8 @@
 
 void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
 {
-	/*
-	 * This function does not take care if someone is using the device.
-	 * To be improved.
-	 */
+	/* Reset possible fault of previous session */
+	clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
 
 	if (priv->config.reset_n_io) {
 		nfc_info(priv->dev, "reset the chip\n");
@@ -215,6 +245,12 @@
 		nfc_info(priv->dev, "no reset available on this interface\n");
 }
 
+void nfcmrvl_chip_halt(struct nfcmrvl_private *priv)
+{
+	if (priv->config.reset_n_io)
+		gpio_set_value(priv->config.reset_n_io, 0);
+}
+
 #ifdef CONFIG_OF
 
 int nfcmrvl_parse_dt(struct device_node *node,
@@ -252,6 +288,5 @@
 EXPORT_SYMBOL_GPL(nfcmrvl_parse_dt);
 
 MODULE_AUTHOR("Marvell International Ltd.");
-MODULE_DESCRIPTION("Marvell NFC driver ver " VERSION);
-MODULE_VERSION(VERSION);
+MODULE_DESCRIPTION("Marvell NFC driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/nfc/nfcmrvl/nfcmrvl.h b/drivers/nfc/nfcmrvl/nfcmrvl.h
index e5a7e54..de68ff4 100644
--- a/drivers/nfc/nfcmrvl/nfcmrvl.h
+++ b/drivers/nfc/nfcmrvl/nfcmrvl.h
@@ -1,7 +1,7 @@
 /**
  * Marvell NFC driver
  *
- * Copyright (C) 2014, Marvell International Ltd.
+ * Copyright (C) 2014-2015, Marvell International Ltd.
  *
  * This software file (the "File") is distributed by Marvell International
  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
@@ -21,8 +21,11 @@
 
 #include <linux/platform_data/nfcmrvl.h>
 
+#include "fw_dnld.h"
+
 /* Define private flags: */
 #define NFCMRVL_NCI_RUNNING			1
+#define NFCMRVL_PHY_ERROR			2
 
 #define NFCMRVL_EXT_COEX_ID			0xE0
 #define NFCMRVL_NOT_ALLOWED_ID			0xE1
@@ -37,6 +40,8 @@
 */
 
 #define NFCMRVL_PB_BAIL_OUT			0x11
+#define NFCMRVL_PROP_REF_CLOCK			0xF0
+#define NFCMRVL_PROP_SET_HI_CONFIG		0xF1
 
 /*
 ** HCI defines
@@ -52,9 +57,10 @@
 enum nfcmrvl_phy {
 	NFCMRVL_PHY_USB		= 0,
 	NFCMRVL_PHY_UART	= 1,
+	NFCMRVL_PHY_I2C		= 2,
+	NFCMRVL_PHY_SPI		= 3,
 };
 
-
 struct nfcmrvl_private {
 
 	unsigned long flags;
@@ -62,8 +68,15 @@
 	/* Platform configuration */
 	struct nfcmrvl_platform_data config;
 
+	/* Parent dev */
 	struct nci_dev *ndev;
 
+	/* FW download context */
+	struct nfcmrvl_fw_dnld fw_dnld;
+
+	/* FW download support */
+	bool support_fw_dnld;
+
 	/*
 	** PHY related information
 	*/
@@ -82,17 +95,21 @@
 	int (*nci_open) (struct nfcmrvl_private *priv);
 	int (*nci_close) (struct nfcmrvl_private *priv);
 	int (*nci_send) (struct nfcmrvl_private *priv, struct sk_buff *skb);
+	void (*nci_update_config)(struct nfcmrvl_private *priv,
+				  const void *param);
 };
 
 void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv);
 int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, struct sk_buff *skb);
-struct nfcmrvl_private *nfcmrvl_nci_register_dev(void *drv_data,
+struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy,
+				void *drv_data,
 				struct nfcmrvl_if_ops *ops,
 				struct device *dev,
 				struct nfcmrvl_platform_data *pdata);
 
 
 void nfcmrvl_chip_reset(struct nfcmrvl_private *priv);
+void nfcmrvl_chip_halt(struct nfcmrvl_private *priv);
 
 int nfcmrvl_parse_dt(struct device_node *node,
 		     struct nfcmrvl_platform_data *pdata);
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
new file mode 100644
index 0000000..a7faa0b
--- /dev/null
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -0,0 +1,228 @@
+/**
+ * Marvell NFC-over-SPI driver: SPI interface related functions
+ *
+ * Copyright (C) 2015, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ **/
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/nfc.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include "nfcmrvl.h"
+
+#define SPI_WAIT_HANDSHAKE	1
+
+struct nfcmrvl_spi_drv_data {
+	unsigned long flags;
+	struct spi_device *spi;
+	struct nci_spi *nci_spi;
+	struct completion handshake_completion;
+	struct nfcmrvl_private *priv;
+};
+
+static irqreturn_t nfcmrvl_spi_int_irq_thread_fn(int irq, void *drv_data_ptr)
+{
+	struct nfcmrvl_spi_drv_data *drv_data = drv_data_ptr;
+	struct sk_buff *skb;
+
+	/*
+	 * Special case where we are waiting for SPI_INT deassertion to start a
+	 * transfer.
+	 */
+	if (test_and_clear_bit(SPI_WAIT_HANDSHAKE, &drv_data->flags)) {
+		complete(&drv_data->handshake_completion);
+		return IRQ_HANDLED;
+	}
+
+	/* Normal case, SPI_INT deasserted by slave to trigger a master read */
+
+	skb = nci_spi_read(drv_data->nci_spi);
+	if (!skb) {
+		nfc_err(&drv_data->spi->dev, "failed to read spi packet");
+		return IRQ_HANDLED;
+	}
+
+	if (nfcmrvl_nci_recv_frame(drv_data->priv, skb) < 0)
+		nfc_err(&drv_data->spi->dev, "corrupted RX packet");
+
+	return IRQ_HANDLED;
+}
+
+static int nfcmrvl_spi_nci_open(struct nfcmrvl_private *priv)
+{
+	return 0;
+}
+
+static int nfcmrvl_spi_nci_close(struct nfcmrvl_private *priv)
+{
+	return 0;
+}
+
+static int nfcmrvl_spi_nci_send(struct nfcmrvl_private *priv,
+				struct sk_buff *skb)
+{
+	struct nfcmrvl_spi_drv_data *drv_data = priv->drv_data;
+	int err;
+
+	/* Reinit completion for slave handshake */
+	reinit_completion(&drv_data->handshake_completion);
+	set_bit(SPI_WAIT_HANDSHAKE, &drv_data->flags);
+
+	/*
+	 * Append a dummy byte at the end of SPI frame. This is due to a
+	 * specific DMA implementation in the controller
+	 */
+	skb_put(skb, 1);
+
+	/* Send the SPI packet */
+	err = nci_spi_send(drv_data->nci_spi, &drv_data->handshake_completion,
+			   skb);
+	if (err != 0) {
+		nfc_err(priv->dev, "spi_send failed %d", err);
+		kfree_skb(skb);
+	}
+	return err;
+}
+
+static void nfcmrvl_spi_nci_update_config(struct nfcmrvl_private *priv,
+					  const void *param)
+{
+	struct nfcmrvl_spi_drv_data *drv_data = priv->drv_data;
+	const struct nfcmrvl_fw_spi_config *config = param;
+
+	drv_data->nci_spi->xfer_speed_hz = config->clk;
+}
+
+static struct nfcmrvl_if_ops spi_ops = {
+	.nci_open = nfcmrvl_spi_nci_open,
+	.nci_close = nfcmrvl_spi_nci_close,
+	.nci_send = nfcmrvl_spi_nci_send,
+	.nci_update_config = nfcmrvl_spi_nci_update_config,
+};
+
+static int nfcmrvl_spi_parse_dt(struct device_node *node,
+				struct nfcmrvl_platform_data *pdata)
+{
+	int ret;
+
+	ret = nfcmrvl_parse_dt(node, pdata);
+	if (ret < 0) {
+		pr_err("Failed to get generic entries\n");
+		return ret;
+	}
+
+	ret = irq_of_parse_and_map(node, 0);
+	if (ret < 0) {
+		pr_err("Unable to get irq, error: %d\n", ret);
+		return ret;
+	}
+	pdata->irq = ret;
+
+	return 0;
+}
+
+static int nfcmrvl_spi_probe(struct spi_device *spi)
+{
+	struct nfcmrvl_platform_data *pdata;
+	struct nfcmrvl_platform_data config;
+	struct nfcmrvl_spi_drv_data *drv_data;
+	int ret = 0;
+
+	drv_data = devm_kzalloc(&spi->dev, sizeof(*drv_data), GFP_KERNEL);
+	if (!drv_data)
+		return -ENOMEM;
+
+	drv_data->spi = spi;
+	drv_data->priv = NULL;
+	spi_set_drvdata(spi, drv_data);
+
+	pdata = spi->dev.platform_data;
+
+	if (!pdata && spi->dev.of_node)
+		if (nfcmrvl_spi_parse_dt(spi->dev.of_node, &config) == 0)
+			pdata = &config;
+
+	if (!pdata)
+		return -EINVAL;
+
+	ret = devm_request_threaded_irq(&drv_data->spi->dev, pdata->irq,
+					NULL, nfcmrvl_spi_int_irq_thread_fn,
+					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+					"nfcmrvl_spi_int", drv_data);
+	if (ret < 0) {
+		nfc_err(&drv_data->spi->dev, "Unable to register IRQ handler");
+		return -ENODEV;
+	}
+
+	drv_data->priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_SPI,
+						  drv_data, &spi_ops,
+						  &drv_data->spi->dev,
+						  pdata);
+	if (IS_ERR(drv_data->priv))
+		return PTR_ERR(drv_data->priv);
+
+	drv_data->priv->support_fw_dnld = true;
+
+	drv_data->nci_spi = nci_spi_allocate_spi(drv_data->spi, 0, 10,
+						 drv_data->priv->ndev);
+
+	/* Init completion for slave handshake */
+	init_completion(&drv_data->handshake_completion);
+	return 0;
+}
+
+static int nfcmrvl_spi_remove(struct spi_device *spi)
+{
+	struct nfcmrvl_spi_drv_data *drv_data = spi_get_drvdata(spi);
+
+	nfcmrvl_nci_unregister_dev(drv_data->priv);
+	return 0;
+}
+
+static const struct of_device_id of_nfcmrvl_spi_match[] = {
+	{ .compatible = "marvell,nfc-spi", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_nfcmrvl_spi_match);
+
+static const struct spi_device_id nfcmrvl_spi_id_table[] = {
+	{ "nfcmrvl_spi", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, nfcmrvl_spi_id_table);
+
+static struct spi_driver nfcmrvl_spi_driver = {
+	.probe		= nfcmrvl_spi_probe,
+	.remove		= nfcmrvl_spi_remove,
+	.id_table	= nfcmrvl_spi_id_table,
+	.driver		= {
+		.name		= "nfcmrvl_spi",
+		.owner		= THIS_MODULE,
+		.of_match_table	= of_match_ptr(of_nfcmrvl_spi_match),
+	},
+};
+
+module_spi_driver(nfcmrvl_spi_driver);
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION("Marvell NFC-over-SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 61442d6..f3d041c 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -50,10 +50,21 @@
 	return nu->ops.send(nu, skb);
 }
 
+static void nfcmrvl_uart_nci_update_config(struct nfcmrvl_private *priv,
+					   const void *param)
+{
+	struct nci_uart *nu = priv->drv_data;
+	const struct nfcmrvl_fw_uart_config *config = param;
+
+	nci_uart_set_config(nu, le32_to_cpu(config->baudrate),
+			    config->flow_control);
+}
+
 static struct nfcmrvl_if_ops uart_ops = {
 	.nci_open = nfcmrvl_uart_nci_open,
 	.nci_close = nfcmrvl_uart_nci_close,
 	.nci_send = nfcmrvl_uart_nci_send,
+	.nci_update_config = nfcmrvl_uart_nci_update_config
 };
 
 #ifdef CONFIG_OF
@@ -64,9 +75,13 @@
 	struct device_node *matched_node;
 	int ret;
 
-	matched_node = of_find_compatible_node(node, NULL, "mrvl,nfc-uart");
-	if (!matched_node)
-		return -ENODEV;
+	matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart");
+	if (!matched_node) {
+		matched_node = of_find_compatible_node(node, NULL,
+						       "mrvl,nfc-uart");
+		if (!matched_node)
+			return -ENODEV;
+	}
 
 	ret = nfcmrvl_parse_dt(matched_node, pdata);
 	if (ret < 0) {
@@ -127,11 +142,12 @@
 		pdata = &config;
 	}
 
-	priv = nfcmrvl_nci_register_dev(nu, &uart_ops, nu->tty->dev, pdata);
+	priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_UART, nu, &uart_ops,
+					nu->tty->dev, pdata);
 	if (IS_ERR(priv))
 		return PTR_ERR(priv);
 
-	priv->phy = NFCMRVL_PHY_UART;
+	priv->support_fw_dnld = true;
 
 	nu->drv_data = priv;
 	nu->ndev = priv->ndev;
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 7d1fe43..585a0f2 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -23,8 +23,6 @@
 #include <net/nfc/nci_core.h>
 #include "nfcmrvl.h"
 
-#define VERSION "1.0"
-
 static struct usb_device_id nfcmrvl_table[] = {
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x1286, 0x2046,
 					USB_CLASS_VENDOR_SPEC, 4, 1) },
@@ -342,13 +340,14 @@
 	init_usb_anchor(&drv_data->bulk_anchor);
 	init_usb_anchor(&drv_data->deferred);
 
-	priv = nfcmrvl_nci_register_dev(drv_data, &usb_ops,
+	priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_USB, drv_data, &usb_ops,
 					&drv_data->udev->dev, &config);
 	if (IS_ERR(priv))
 		return PTR_ERR(priv);
 
 	drv_data->priv = priv;
-	drv_data->priv->phy = NFCMRVL_PHY_USB;
+	drv_data->priv->support_fw_dnld = false;
+
 	priv->dev = &drv_data->udev->dev;
 
 	usb_set_intfdata(intf, drv_data);
@@ -469,6 +468,5 @@
 module_usb_driver(nfcmrvl_usb_driver);
 
 MODULE_AUTHOR("Marvell International Ltd.");
-MODULE_DESCRIPTION("Marvell NFC-over-USB driver ver " VERSION);
-MODULE_VERSION(VERSION);
+MODULE_DESCRIPTION("Marvell NFC-over-USB driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index 93111fa..26ac9e5 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -246,7 +246,7 @@
 }
 
 static void nfcsim_deactivate_target(struct nfc_dev *nfc_dev,
-				     struct nfc_target *target)
+				     struct nfc_target *target, u8 mode)
 {
 	struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index ce2e2cf..f81e500 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -497,7 +497,7 @@
 
 static int nfcwilink_probe(struct platform_device *pdev)
 {
-	static struct nfcwilink *drv;
+	struct nfcwilink *drv;
 	int rc;
 	__u32 protocols;
 
diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c
index 8979636..2e4b004 100644
--- a/drivers/nfc/nxp-nci/core.c
+++ b/drivers/nfc/nxp-nci/core.c
@@ -109,7 +109,8 @@
 };
 
 int nxp_nci_probe(void *phy_id, struct device *pdev,
-		  struct nxp_nci_phy_ops *phy_ops, unsigned int max_payload,
+		  const struct nxp_nci_phy_ops *phy_ops,
+		  unsigned int max_payload,
 		  struct nci_dev **ndev)
 {
 	struct nxp_nci_info *info;
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index fac80c6..df4333c 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -106,7 +106,7 @@
 	return r;
 }
 
-static struct nxp_nci_phy_ops i2c_phy_ops = {
+static const struct nxp_nci_phy_ops i2c_phy_ops = {
 	.set_mode = nxp_nci_i2c_set_mode,
 	.write = nxp_nci_i2c_write,
 };
diff --git a/drivers/nfc/nxp-nci/nxp-nci.h b/drivers/nfc/nxp-nci/nxp-nci.h
index f1fecc4..20408cb 100644
--- a/drivers/nfc/nxp-nci/nxp-nci.h
+++ b/drivers/nfc/nxp-nci/nxp-nci.h
@@ -68,7 +68,7 @@
 
 	enum nxp_nci_mode mode;
 
-	struct nxp_nci_phy_ops *phy_ops;
+	const struct nxp_nci_phy_ops *phy_ops;
 	unsigned int max_payload;
 
 	struct mutex info_lock;
@@ -82,7 +82,8 @@
 void nxp_nci_fw_work_complete(struct nxp_nci_info *info, int result);
 
 int nxp_nci_probe(void *phy_id, struct device *pdev,
-		  struct nxp_nci_phy_ops *phy_ops, unsigned int max_payload,
+		  const struct nxp_nci_phy_ops *phy_ops,
+		  unsigned int max_payload,
 		  struct nci_dev **ndev);
 void nxp_nci_remove(struct nci_dev *ndev);
 
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index a03e4eb..bb3d5ea 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -2263,7 +2263,7 @@
 }
 
 static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
-				    struct nfc_target *target)
+				    struct nfc_target *target, u8 mode)
 {
 	struct pn533 *dev = nfc_get_drvdata(nfc_dev);
 	struct sk_buff *skb;
diff --git a/drivers/nfc/pn544/Kconfig b/drivers/nfc/pn544/Kconfig
index ccf06f5..2b8bde3 100644
--- a/drivers/nfc/pn544/Kconfig
+++ b/drivers/nfc/pn544/Kconfig
@@ -1,20 +1,15 @@
 config NFC_PN544
-	tristate "NXP PN544 NFC driver"
-	depends on NFC_HCI
+	tristate
 	select CRC_CCITT
-	default n
 	---help---
 	  NXP PN544 core driver.
 	  This is a driver based on the HCI NFC kernel layers and
 	  will thus not work with NXP libnfc library.
 
-	  To compile this driver as a module, choose m here. The module will
-	  be called pn544.
-	  Say N if unsure.
-
 config NFC_PN544_I2C
-	tristate "NFC PN544 i2c support"
-	depends on NFC_PN544 && I2C && NFC_SHDLC
+	tristate "NXP PN544 device support (I2C)"
+	depends on NFC_HCI && I2C && NFC_SHDLC
+	select NFC_PN544
 	---help---
 	  This module adds support for the NXP pn544 i2c interface.
 	  Select this if your platform is using the i2c bus.
@@ -23,8 +18,9 @@
 	  Say N if unsure.
 
 config NFC_PN544_MEI
-	tristate "NFC PN544 MEI support"
-	depends on NFC_PN544 && NFC_MEI_PHY
+	tristate "NXP PN544 device support (MEI)"
+	depends on NFC_HCI && NFC_MEI_PHY
+	select NFC_PN544
 	---help---
 	  This module adds support for the mei interface of adapters using
 	  NXP pn544 chipsets.  Select this if your pn544 chipset
diff --git a/drivers/nfc/s3fwrn5/Kconfig b/drivers/nfc/s3fwrn5/Kconfig
index 7e3b255..1eef919 100644
--- a/drivers/nfc/s3fwrn5/Kconfig
+++ b/drivers/nfc/s3fwrn5/Kconfig
@@ -1,5 +1,6 @@
 config NFC_S3FWRN5
 	tristate
+	select CRYPTO
 	---help---
 	  Core driver for Samsung S3FWRN5 NFC chip. Contains core utilities
 	  of chip. It's intended to be used by PHYs to avoid duplicating lots
diff --git a/drivers/nfc/s3fwrn5/Makefile b/drivers/nfc/s3fwrn5/Makefile
index 3381c34..ddfa7be 100644
--- a/drivers/nfc/s3fwrn5/Makefile
+++ b/drivers/nfc/s3fwrn5/Makefile
@@ -7,5 +7,3 @@
 
 obj-$(CONFIG_NFC_S3FWRN5) += s3fwrn5.o
 obj-$(CONFIG_NFC_S3FWRN5_I2C) += s3fwrn5_i2c.o
-
-ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index b4dd7dd..c61d8a3 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -258,7 +258,7 @@
 	if (ret < 0)
 		return ret;
 
-	ret = request_threaded_irq(phy->i2c_dev->irq, NULL,
+	ret = devm_request_threaded_irq(&client->dev, phy->i2c_dev->irq, NULL,
 		s3fwrn5_i2c_irq_thread_fn, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
 		S3FWRN5_I2C_DRIVER_NAME, phy);
 	if (ret)
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
index ace0071..075e4e8 100644
--- a/drivers/nfc/s3fwrn5/nci.c
+++ b/drivers/nfc/s3fwrn5/nci.c
@@ -31,7 +31,7 @@
 	return 0;
 }
 
-static struct nci_prop_ops s3fwrn5_nci_prop_ops[] = {
+static struct nci_driver_ops s3fwrn5_nci_prop_ops[] = {
 	{
 		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
 				NCI_PROP_AGAIN),
@@ -79,7 +79,7 @@
 	},
 };
 
-void s3fwrn5_nci_get_prop_ops(struct nci_prop_ops **ops, size_t *n)
+void s3fwrn5_nci_get_prop_ops(struct nci_driver_ops **ops, size_t *n)
 {
 	*ops = s3fwrn5_nci_prop_ops;
 	*n = ARRAY_SIZE(s3fwrn5_nci_prop_ops);
diff --git a/drivers/nfc/s3fwrn5/nci.h b/drivers/nfc/s3fwrn5/nci.h
index 0e68d43..60c7fb5 100644
--- a/drivers/nfc/s3fwrn5/nci.h
+++ b/drivers/nfc/s3fwrn5/nci.h
@@ -83,7 +83,7 @@
 
 #define NCI_PROP_WR_RESET	0x2f
 
-void s3fwrn5_nci_get_prop_ops(struct nci_prop_ops **ops, size_t *n);
+void s3fwrn5_nci_get_prop_ops(struct nci_driver_ops **ops, size_t *n);
 int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name);
 
 #endif /* __LOCAL_S3FWRN5_NCI_H_ */
diff --git a/drivers/nfc/st-nci/Makefile b/drivers/nfc/st-nci/Makefile
index 348ce76..439b2fa 100644
--- a/drivers/nfc/st-nci/Makefile
+++ b/drivers/nfc/st-nci/Makefile
@@ -1,8 +1,8 @@
 #
-# Makefile for ST21NFCB NCI based NFC driver
+# Makefile for ST_NCI NCI based NFC driver
 #
 
-st-nci-objs = ndlc.o core.o st-nci_se.o
+st-nci-objs = ndlc.o core.o se.o vendor_cmds.o
 obj-$(CONFIG_NFC_ST_NCI)     += st-nci.o
 
 st-nci_i2c-objs = i2c.o
diff --git a/drivers/nfc/st-nci/core.c b/drivers/nfc/st-nci/core.c
index c419d39..c693128 100644
--- a/drivers/nfc/st-nci/core.c
+++ b/drivers/nfc/st-nci/core.c
@@ -24,7 +24,6 @@
 #include <linux/delay.h>
 
 #include "st-nci.h"
-#include "st-nci_se.h"
 
 #define DRIVER_DESC "NCI NFC driver for ST_NCI"
 
@@ -98,7 +97,7 @@
 	return 0;
 }
 
-static struct nci_prop_ops st_nci_prop_ops[] = {
+static struct nci_driver_ops st_nci_prop_ops[] = {
 	{
 		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
 					  ST_NCI_CORE_PROP),
@@ -124,7 +123,7 @@
 };
 
 int st_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
-		       int phy_tailroom)
+		 int phy_tailroom, struct st_nci_se_status *se_status)
 {
 	struct st_nci_info *info;
 	int r;
@@ -153,14 +152,23 @@
 
 	nci_set_drvdata(ndlc->ndev, info);
 
+	r = st_nci_vendor_cmds_init(ndlc->ndev);
+	if (r) {
+		pr_err("Cannot register proprietary vendor cmds\n");
+		goto err_reg_dev;
+	}
+
 	r = nci_register_device(ndlc->ndev);
 	if (r) {
 		pr_err("Cannot register nfc device to nci core\n");
-		nci_free_device(ndlc->ndev);
-		return r;
+		goto err_reg_dev;
 	}
 
-	return st_nci_se_init(ndlc->ndev);
+	return st_nci_se_init(ndlc->ndev, se_status);
+
+err_reg_dev:
+	nci_free_device(ndlc->ndev);
+	return r;
 }
 EXPORT_SYMBOL_GPL(st_nci_probe);
 
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 707ed2e..15e3ce2 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -27,12 +27,12 @@
 #include <linux/nfc.h>
 #include <linux/platform_data/st-nci.h>
 
-#include "ndlc.h"
+#include "st-nci.h"
 
 #define DRIVER_DESC "NCI NFC driver for ST_NCI"
 
 /* ndlc header */
-#define ST_NCI_FRAME_HEADROOM	1
+#define ST_NCI_FRAME_HEADROOM 1
 #define ST_NCI_FRAME_TAILROOM 0
 
 #define ST_NCI_I2C_MIN_SIZE 4   /* PCB(1) + NCI Packet header(3) */
@@ -50,16 +50,13 @@
 	struct i2c_client *i2c_dev;
 	struct llt_ndlc *ndlc;
 
+	bool irq_active;
+
 	unsigned int gpio_reset;
 	unsigned int irq_polarity;
-};
 
-#define I2C_DUMP_SKB(info, skb)					\
-do {								\
-	pr_debug("%s:\n", info);				\
-	print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET,	\
-		       16, 1, (skb)->data, (skb)->len, 0);	\
-} while (0)
+	struct st_nci_se_status se_status;
+};
 
 static int st_nci_i2c_enable(void *phy_id)
 {
@@ -70,8 +67,10 @@
 	gpio_set_value(phy->gpio_reset, 1);
 	usleep_range(80000, 85000);
 
-	if (phy->ndlc->powered == 0)
+	if (phy->ndlc->powered == 0 && phy->irq_active == 0) {
 		enable_irq(phy->i2c_dev->irq);
+		phy->irq_active = true;
+	}
 
 	return 0;
 }
@@ -81,6 +80,7 @@
 	struct st_nci_i2c_phy *phy = phy_id;
 
 	disable_irq_nosync(phy->i2c_dev->irq);
+	phy->irq_active = false;
 }
 
 /*
@@ -94,8 +94,6 @@
 	struct st_nci_i2c_phy *phy = phy_id;
 	struct i2c_client *client = phy->i2c_dev;
 
-	I2C_DUMP_SKB("st_nci_i2c_write", skb);
-
 	if (phy->ndlc->hard_fault != 0)
 		return phy->ndlc->hard_fault;
 
@@ -166,8 +164,6 @@
 	skb_put(*skb, len);
 	memcpy((*skb)->data + ST_NCI_I2C_MIN_SIZE, buf, len);
 
-	I2C_DUMP_SKB("i2c frame read", *skb);
-
 	return 0;
 }
 
@@ -245,6 +241,11 @@
 
 	phy->irq_polarity = irq_get_trigger_type(client->irq);
 
+	phy->se_status.is_ese_present =
+				of_property_read_bool(pp, "ese-present");
+	phy->se_status.is_uicc_present =
+				of_property_read_bool(pp, "uicc-present");
+
 	return 0;
 }
 #else
@@ -277,6 +278,9 @@
 		return r;
 	}
 
+	phy->se_status.is_ese_present = pdata->is_ese_present;
+	phy->se_status.is_uicc_present = pdata->is_uicc_present;
+
 	return 0;
 }
 
@@ -326,12 +330,13 @@
 
 	r = ndlc_probe(phy, &i2c_phy_ops, &client->dev,
 			ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM,
-			&phy->ndlc);
+			&phy->ndlc, &phy->se_status);
 	if (r < 0) {
 		nfc_err(&client->dev, "Unable to register ndlc layer\n");
 		return r;
 	}
 
+	phy->irq_active = true;
 	r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
 				st_nci_irq_thread_fn,
 				phy->irq_polarity | IRQF_ONESHOT,
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index d2cf84e..0884b11 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -19,8 +19,8 @@
 #include <linux/sched.h>
 #include <net/nfc/nci_core.h>
 
-#include "ndlc.h"
 #include "st-nci.h"
+#include "ndlc.h"
 
 #define NDLC_TIMER_T1		100
 #define NDLC_TIMER_T1_WAIT	400
@@ -266,7 +266,8 @@
 }
 
 int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
-	       int phy_headroom, int phy_tailroom, struct llt_ndlc **ndlc_id)
+	       int phy_headroom, int phy_tailroom, struct llt_ndlc **ndlc_id,
+	       struct st_nci_se_status *se_status)
 {
 	struct llt_ndlc *ndlc;
 
@@ -296,7 +297,7 @@
 
 	INIT_WORK(&ndlc->sm_work, llt_ndlc_sm_work);
 
-	return st_nci_probe(ndlc, phy_headroom, phy_tailroom);
+	return st_nci_probe(ndlc, phy_headroom, phy_tailroom, se_status);
 }
 EXPORT_SYMBOL(ndlc_probe);
 
diff --git a/drivers/nfc/st-nci/ndlc.h b/drivers/nfc/st-nci/ndlc.h
index 6361005..bdf78ff 100644
--- a/drivers/nfc/st-nci/ndlc.h
+++ b/drivers/nfc/st-nci/ndlc.h
@@ -22,6 +22,8 @@
 #include <linux/skbuff.h>
 #include <net/nfc/nfc.h>
 
+struct st_nci_se_status;
+
 /* Low Level Transport description */
 struct llt_ndlc {
 	struct nci_dev *ndev;
@@ -55,6 +57,7 @@
 int ndlc_send(struct llt_ndlc *ndlc, struct sk_buff *skb);
 void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb);
 int ndlc_probe(void *phy_id, struct nfc_phy_ops *phy_ops, struct device *dev,
-	int phy_headroom, int phy_tailroom, struct llt_ndlc **ndlc_id);
+	       int phy_headroom, int phy_tailroom, struct llt_ndlc **ndlc_id,
+	       struct st_nci_se_status *se_status);
 void ndlc_remove(struct llt_ndlc *ndlc);
 #endif /* __LOCAL_NDLC_H__ */
diff --git a/drivers/nfc/st-nci/st-nci_se.c b/drivers/nfc/st-nci/se.c
similarity index 82%
rename from drivers/nfc/st-nci/st-nci_se.c
rename to drivers/nfc/st-nci/se.c
index c742ef6..dbab722 100644
--- a/drivers/nfc/st-nci/st-nci_se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -23,7 +23,6 @@
 #include <net/nfc/nci_core.h>
 
 #include "st-nci.h"
-#include "st-nci_se.h"
 
 struct st_nci_pipe_info {
 	u8 pipe_state;
@@ -40,7 +39,6 @@
 #define ST_NCI_ESE_HOST_ID            0xc0
 
 /* Gates */
-#define ST_NCI_DEVICE_MGNT_GATE       0x01
 #define ST_NCI_APDU_READER_GATE       0xf0
 #define ST_NCI_CONNECTIVITY_GATE      0x41
 
@@ -64,7 +62,7 @@
 
 #define ST_NCI_EVT_SE_HARD_RESET		0x20
 #define ST_NCI_EVT_TRANSMIT_DATA		0x10
-#define ST_NCI_EVT_WTX_REQUEST		0x11
+#define ST_NCI_EVT_WTX_REQUEST			0x11
 #define ST_NCI_EVT_SE_SOFT_RESET		0x11
 #define ST_NCI_EVT_SE_END_OF_APDU_TRANSFER	0x21
 #define ST_NCI_EVT_HOT_PLUG			0x03
@@ -113,6 +111,11 @@
 	{ST_NCI_DEVICE_MGNT_GATE, ST_NCI_DEVICE_MGNT_PIPE,
 					ST_NCI_HOST_CONTROLLER_ID},
 
+	{NCI_HCI_IDENTITY_MGMT_GATE, NCI_HCI_INVALID_PIPE,
+					ST_NCI_HOST_CONTROLLER_ID},
+	{NCI_HCI_LOOPBACK_GATE, NCI_HCI_INVALID_PIPE,
+					ST_NCI_HOST_CONTROLLER_ID},
+
 	/* Secure element pipes are created by secure element host */
 	{ST_NCI_CONNECTIVITY_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
 					ST_NCI_HOST_CONTROLLER_ID},
@@ -226,27 +229,32 @@
 			continue;
 		}
 
-		for (j = 0; (j < ARRAY_SIZE(st_nci_gates)) &&
+		for (j = 3; (j < ARRAY_SIZE(st_nci_gates)) &&
 		     (st_nci_gates[j].gate != dm_pipe_info->dst_gate_id); j++)
 			;
 
 		if (j < ARRAY_SIZE(st_nci_gates) &&
 		    st_nci_gates[j].gate == dm_pipe_info->dst_gate_id &&
 		    ST_NCI_DM_IS_PIPE_OPEN(dm_pipe_info->pipe_state)) {
-			st_nci_gates[j].pipe = pipe_info[2];
+			ndev->hci_dev->init_data.gates[j].pipe = pipe_info[2];
 
 			ndev->hci_dev->gate2pipe[st_nci_gates[j].gate] =
-						st_nci_gates[j].pipe;
-			ndev->hci_dev->pipes[st_nci_gates[j].pipe].gate =
+						pipe_info[2];
+			ndev->hci_dev->pipes[pipe_info[2]].gate =
 						st_nci_gates[j].gate;
-			ndev->hci_dev->pipes[st_nci_gates[j].pipe].host =
+			ndev->hci_dev->pipes[pipe_info[2]].host =
 						dm_pipe_info->src_host_id;
 		}
 		kfree_skb(skb_pipe_info);
 	}
 
-	memcpy(ndev->hci_dev->init_data.gates, st_nci_gates,
-	       sizeof(st_nci_gates));
+	/*
+	 * 3 gates have a well known pipe ID. Only NCI_HCI_LINK_MGMT_GATE
+	 * is not yet open at this stage.
+	 */
+	r = nci_hci_connect_gate(ndev, ST_NCI_HOST_CONTROLLER_ID,
+				 NCI_HCI_LINK_MGMT_GATE,
+				 NCI_HCI_LINK_MGMT_PIPE);
 
 	kfree_skb(skb_pipe_list);
 	return r;
@@ -272,6 +280,8 @@
 			}
 		}
 	break;
+	default:
+		nfc_err(&ndev->nfc_dev->dev, "Unexpected event on admin gate\n");
 	}
 }
 
@@ -295,6 +305,9 @@
 		mod_timer(&info->se_info.bwi_timer, jiffies +
 			  msecs_to_jiffies(info->se_info.wt_timeout));
 	break;
+	default:
+		nfc_err(&ndev->nfc_dev->dev, "Unexpected event on apdu reader gate\n");
+		return 1;
 	}
 
 	kfree_skb(skb);
@@ -349,6 +362,7 @@
 		r = nfc_se_transaction(ndev->nfc_dev, host, transaction);
 		break;
 	default:
+		nfc_err(&ndev->nfc_dev->dev, "Unexpected event on connectivity gate\n");
 		return 1;
 	}
 	kfree_skb(skb);
@@ -369,8 +383,10 @@
 		st_nci_hci_apdu_reader_event_received(ndev, event, skb);
 	break;
 	case ST_NCI_CONNECTIVITY_GATE:
-		st_nci_hci_connectivity_event_received(ndev, host, event,
-							 skb);
+		st_nci_hci_connectivity_event_received(ndev, host, event, skb);
+	break;
+	case NCI_HCI_LOOPBACK_GATE:
+		st_nci_hci_loopback_event_received(ndev, event, skb);
 	break;
 	}
 }
@@ -403,15 +419,11 @@
 }
 EXPORT_SYMBOL_GPL(st_nci_hci_cmd_received);
 
-/*
- * Remarks: On some early st_nci firmware, nci_nfcee_mode_set(0)
- * is rejected
- */
 static int st_nci_control_se(struct nci_dev *ndev, u8 se_idx,
-				   u8 state)
+			     u8 state)
 {
 	struct st_nci_info *info = nci_get_drvdata(ndev);
-	int r;
+	int r, i;
 	struct sk_buff *sk_host_list;
 	u8 host_id;
 
@@ -433,7 +445,7 @@
 	 * retrieve a relevant host list.
 	 */
 	reinit_completion(&info->se_info.req_completion);
-	r = nci_nfcee_mode_set(ndev, se_idx, NCI_NFCEE_ENABLE);
+	r = nci_nfcee_mode_set(ndev, se_idx, state);
 	if (r != NCI_STATUS_OK)
 		return r;
 
@@ -449,14 +461,19 @@
 	 * There is no possible synchronization to prevent this.
 	 * Adding a small delay is the only way to solve the issue.
 	 */
-	usleep_range(3000, 5000);
+	if (info->se_info.se_status->is_ese_present &&
+	    info->se_info.se_status->is_uicc_present)
+		usleep_range(15000, 20000);
 
 	r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE,
 			NCI_HCI_ADMIN_PARAM_HOST_LIST, &sk_host_list);
 	if (r != NCI_HCI_ANY_OK)
 		return r;
 
-	host_id = sk_host_list->data[sk_host_list->len - 1];
+	for (i = 0; i < sk_host_list->len &&
+		sk_host_list->data[i] != se_idx; i++)
+		;
+	host_id = sk_host_list->data[i];
 	kfree_skb(sk_host_list);
 	if (state == ST_NCI_SE_MODE_ON && host_id == se_idx)
 		return se_idx;
@@ -472,11 +489,20 @@
 
 	pr_debug("st_nci_disable_se\n");
 
-	if (se_idx == NFC_SE_EMBEDDED) {
-		r = nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE,
-				ST_NCI_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0);
-		if (r < 0)
-			return r;
+	/*
+	 * According to upper layer, se_idx == NFC_SE_UICC when
+	 * info->se_info.se_status->is_uicc_enable is true should never happen
+	 * Same for eSE.
+	 */
+	r = st_nci_control_se(ndev, se_idx, ST_NCI_SE_MODE_OFF);
+	if (r < 0) {
+		/* Do best effort to release SWP */
+		if (se_idx == NFC_SE_EMBEDDED) {
+			r = nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE,
+					ST_NCI_EVT_SE_END_OF_APDU_TRANSFER,
+					NULL, 0);
+		}
+		return r;
 	}
 
 	return 0;
@@ -489,11 +515,25 @@
 
 	pr_debug("st_nci_enable_se\n");
 
-	if (se_idx == ST_NCI_HCI_HOST_ID_ESE) {
+	/*
+	 * According to upper layer, se_idx == NFC_SE_UICC when
+	 * info->se_info.se_status->is_uicc_enable is true should never happen.
+	 * Same for eSE.
+	 */
+	r = st_nci_control_se(ndev, se_idx, ST_NCI_SE_MODE_ON);
+	if (r == ST_NCI_HCI_HOST_ID_ESE) {
+		st_nci_se_get_atr(ndev);
 		r = nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE,
 				ST_NCI_EVT_SE_SOFT_RESET, NULL, 0);
-		if (r < 0)
-			return r;
+	}
+
+	if (r < 0) {
+		/*
+		 * The activation procedure failed, the secure element
+		 * is not connected. Remove from the list.
+		 */
+		nfc_remove_se(ndev->nfc_dev, se_idx);
+		return r;
 	}
 
 	return 0;
@@ -502,6 +542,7 @@
 
 static int st_nci_hci_network_init(struct nci_dev *ndev)
 {
+	struct st_nci_info *info = nci_get_drvdata(ndev);
 	struct core_conn_create_dest_spec_params *dest_params;
 	struct dest_spec_params spec_params;
 	struct nci_conn_info    *conn_info;
@@ -532,6 +573,7 @@
 	if (!conn_info)
 		goto free_dest_params;
 
+	ndev->hci_dev->init_data.gate_count = ARRAY_SIZE(st_nci_gates);
 	memcpy(ndev->hci_dev->init_data.gates, st_nci_gates,
 	       sizeof(st_nci_gates));
 
@@ -553,10 +595,17 @@
 	if (r != NCI_HCI_ANY_OK)
 		goto free_dest_params;
 
-	r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
-			       NCI_NFCEE_ENABLE);
-	if (r != NCI_STATUS_OK)
-		goto free_dest_params;
+	/*
+	 * In factory mode, we prevent secure elements activation
+	 * by disabling nfcee on the current HCI connection id.
+	 * HCI will be used here only for proprietary commands.
+	 */
+	if (test_bit(ST_NCI_FACTORY_MODE, &info->flags))
+		r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
+				       NCI_NFCEE_DISABLE);
+	else
+		r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
+				       NCI_NFCEE_ENABLE);
 
 free_dest_params:
 	kfree(dest_params);
@@ -567,9 +616,10 @@
 
 int st_nci_discover_se(struct nci_dev *ndev)
 {
-	u8 param[2];
-	int r;
+	u8 white_list[2];
+	int r, wl_size = 0;
 	int se_count = 0;
+	struct st_nci_info *info = nci_get_drvdata(ndev);
 
 	pr_debug("st_nci_discover_se\n");
 
@@ -577,29 +627,37 @@
 	if (r != 0)
 		return r;
 
-	param[0] = ST_NCI_UICC_HOST_ID;
-	param[1] = ST_NCI_HCI_HOST_ID_ESE;
-	r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE,
-				NCI_HCI_ADMIN_PARAM_WHITELIST,
-				param, sizeof(param));
-	if (r != NCI_HCI_ANY_OK)
-		return r;
+	if (test_bit(ST_NCI_FACTORY_MODE, &info->flags))
+		return 0;
 
-	r = st_nci_control_se(ndev, ST_NCI_UICC_HOST_ID,
-				ST_NCI_SE_MODE_ON);
-	if (r == ST_NCI_UICC_HOST_ID) {
+	if (info->se_info.se_status->is_ese_present &&
+	    info->se_info.se_status->is_uicc_present) {
+		white_list[wl_size++] = ST_NCI_UICC_HOST_ID;
+		white_list[wl_size++] = ST_NCI_ESE_HOST_ID;
+	} else if (!info->se_info.se_status->is_ese_present &&
+		   info->se_info.se_status->is_uicc_present) {
+		white_list[wl_size++] = ST_NCI_UICC_HOST_ID;
+	} else if (info->se_info.se_status->is_ese_present &&
+		   !info->se_info.se_status->is_uicc_present) {
+		white_list[wl_size++] = ST_NCI_ESE_HOST_ID;
+	}
+
+	if (wl_size) {
+		r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE,
+				      NCI_HCI_ADMIN_PARAM_WHITELIST,
+				      white_list, wl_size);
+		if (r != NCI_HCI_ANY_OK)
+			return r;
+	}
+
+	if (info->se_info.se_status->is_uicc_present) {
 		nfc_add_se(ndev->nfc_dev, ST_NCI_UICC_HOST_ID, NFC_SE_UICC);
 		se_count++;
 	}
 
-	/* Try to enable eSE in order to check availability */
-	r = st_nci_control_se(ndev, ST_NCI_HCI_HOST_ID_ESE,
-				ST_NCI_SE_MODE_ON);
-	if (r == ST_NCI_HCI_HOST_ID_ESE) {
-		nfc_add_se(ndev->nfc_dev, ST_NCI_HCI_HOST_ID_ESE,
-			   NFC_SE_EMBEDDED);
+	if (info->se_info.se_status->is_ese_present) {
+		nfc_add_se(ndev->nfc_dev, ST_NCI_ESE_HOST_ID, NFC_SE_EMBEDDED);
 		se_count++;
-		st_nci_se_get_atr(ndev);
 	}
 
 	return !se_count;
@@ -672,7 +730,7 @@
 	complete(&info->se_info.req_completion);
 }
 
-int st_nci_se_init(struct nci_dev *ndev)
+int st_nci_se_init(struct nci_dev *ndev, struct st_nci_se_status *se_status)
 {
 	struct st_nci_info *info = nci_get_drvdata(ndev);
 
@@ -694,6 +752,8 @@
 	info->se_info.wt_timeout =
 		ST_NCI_BWI_TO_TIMEOUT(ST_NCI_ATR_DEFAULT_BWI);
 
+	info->se_info.se_status = se_status;
+
 	return 0;
 }
 EXPORT_SYMBOL(st_nci_se_init);
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index 598a58c..cf7ad81 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -25,9 +25,10 @@
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/nfc.h>
+#include <net/nfc/nci.h>
 #include <linux/platform_data/st-nci.h>
 
-#include "ndlc.h"
+#include "st-nci.h"
 
 #define DRIVER_DESC "NCI NFC driver for ST_NCI"
 
@@ -50,16 +51,13 @@
 	struct spi_device *spi_dev;
 	struct llt_ndlc *ndlc;
 
+	bool irq_active;
+
 	unsigned int gpio_reset;
 	unsigned int irq_polarity;
-};
 
-#define SPI_DUMP_SKB(info, skb)					\
-do {								\
-	pr_debug("%s:\n", info);				\
-	print_hex_dump(KERN_DEBUG, "spi: ", DUMP_PREFIX_OFFSET,	\
-		       16, 1, (skb)->data, (skb)->len, 0);	\
-} while (0)
+	struct st_nci_se_status se_status;
+};
 
 static int st_nci_spi_enable(void *phy_id)
 {
@@ -70,8 +68,10 @@
 	gpio_set_value(phy->gpio_reset, 1);
 	usleep_range(80000, 85000);
 
-	if (phy->ndlc->powered == 0)
+	if (phy->ndlc->powered == 0 && phy->irq_active == 0) {
 		enable_irq(phy->spi_dev->irq);
+		phy->irq_active = true;
+	}
 
 	return 0;
 }
@@ -81,6 +81,7 @@
 	struct st_nci_spi_phy *phy = phy_id;
 
 	disable_irq_nosync(phy->spi_dev->irq);
+	phy->irq_active = false;
 }
 
 /*
@@ -94,15 +95,14 @@
 	struct st_nci_spi_phy *phy = phy_id;
 	struct spi_device *dev = phy->spi_dev;
 	struct sk_buff *skb_rx;
-	u8 buf[ST_NCI_SPI_MAX_SIZE];
+	u8 buf[ST_NCI_SPI_MAX_SIZE + NCI_DATA_HDR_SIZE +
+	       ST_NCI_FRAME_HEADROOM + ST_NCI_FRAME_TAILROOM];
 	struct spi_transfer spi_xfer = {
 		.tx_buf = skb->data,
 		.rx_buf = buf,
 		.len = skb->len,
 	};
 
-	SPI_DUMP_SKB("st_nci_spi_write", skb);
-
 	if (phy->ndlc->hard_fault != 0)
 		return phy->ndlc->hard_fault;
 
@@ -179,8 +179,6 @@
 	skb_put(*skb, len);
 	memcpy((*skb)->data + ST_NCI_SPI_MIN_SIZE, buf, len);
 
-	SPI_DUMP_SKB("spi frame read", *skb);
-
 	return 0;
 }
 
@@ -258,6 +256,11 @@
 
 	phy->irq_polarity = irq_get_trigger_type(dev->irq);
 
+	phy->se_status.is_ese_present =
+				of_property_read_bool(pp, "ese-present");
+	phy->se_status.is_uicc_present =
+				of_property_read_bool(pp, "uicc-present");
+
 	return 0;
 }
 #else
@@ -290,6 +293,9 @@
 		return r;
 	}
 
+	phy->se_status.is_ese_present = pdata->is_ese_present;
+	phy->se_status.is_uicc_present = pdata->is_uicc_present;
+
 	return 0;
 }
 
@@ -340,12 +346,13 @@
 
 	r = ndlc_probe(phy, &spi_phy_ops, &dev->dev,
 			ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM,
-			&phy->ndlc);
+			&phy->ndlc, &phy->se_status);
 	if (r < 0) {
 		nfc_err(&dev->dev, "Unable to register ndlc layer\n");
 		return r;
 	}
 
+	phy->irq_active = true;
 	r = devm_request_threaded_irq(&dev->dev, dev->irq, NULL,
 				st_nci_irq_thread_fn,
 				phy->irq_polarity | IRQF_ONESHOT,
diff --git a/drivers/nfc/st-nci/st-nci.h b/drivers/nfc/st-nci/st-nci.h
index 850a239..8b9f77b 100644
--- a/drivers/nfc/st-nci/st-nci.h
+++ b/drivers/nfc/st-nci/st-nci.h
@@ -19,7 +19,6 @@
 #ifndef __LOCAL_ST_NCI_H_
 #define __LOCAL_ST_NCI_H_
 
-#include "st-nci_se.h"
 #include "ndlc.h"
 
 /* Define private flags: */
@@ -28,6 +27,18 @@
 #define ST_NCI_CORE_PROP                0x01
 #define ST_NCI_SET_NFC_MODE             0x02
 
+/*
+ * ref ISO7816-3 chap 8.1. the initial character TS is followed by a
+ * sequence of at most 32 characters.
+ */
+#define ST_NCI_ESE_MAX_LENGTH  33
+#define ST_NCI_HCI_HOST_ID_ESE 0xc0
+
+#define ST_NCI_DEVICE_MGNT_GATE		0x01
+
+#define ST_NCI_VENDOR_OUI 0x0080E1 /* STMicroelectronics */
+#define ST_NCI_FACTORY_MODE 2
+
 struct nci_mode_set_cmd {
 	u8 cmd_type;
 	u8 mode;
@@ -37,14 +48,116 @@
 	u8 status;
 } __packed;
 
+struct st_nci_se_status {
+	bool is_ese_present;
+	bool is_uicc_present;
+};
+
+struct st_nci_se_info {
+	struct st_nci_se_status *se_status;
+	u8 atr[ST_NCI_ESE_MAX_LENGTH];
+	struct completion req_completion;
+
+	struct timer_list bwi_timer;
+	int wt_timeout; /* in msecs */
+	bool bwi_active;
+
+	struct timer_list se_active_timer;
+	bool se_active;
+
+	bool xch_error;
+
+	se_io_cb_t cb;
+	void *cb_context;
+};
+
+/**
+ * enum nfc_vendor_cmds - supported nfc vendor commands
+ *
+ * @FACTORY_MODE: Allow to set the driver into a mode where no secure element
+ *	are activated. It does not consider any NFC_ATTR_VENDOR_DATA.
+ * @HCI_CLEAR_ALL_PIPES: Allow to execute a HCI clear all pipes command.
+ *	It does not consider any NFC_ATTR_VENDOR_DATA.
+ * @HCI_DM_PUT_DATA: Allow to configure specific CLF registry as for example
+ *	RF trimmings or low level drivers configurations (I2C, SPI, SWP).
+ * @HCI_DM_UPDATE_AID: Allow to configure an AID routing into the CLF routing
+ *	table following RF technology, CLF mode or protocol.
+ * @HCI_DM_GET_INFO: Allow to retrieve CLF information.
+ * @HCI_DM_GET_DATA: Allow to retrieve CLF configurable data such as low
+ *	level drivers configurations or RF trimmings.
+ * @HCI_DM_DIRECT_LOAD: Allow to load a firmware into the CLF. A complete
+ *	packet can be more than 8KB.
+ * @HCI_DM_RESET: Allow to run a CLF reset in order to "commit" CLF
+ *	configuration changes without CLF power off.
+ * @HCI_GET_PARAM: Allow to retrieve an HCI CLF parameter (for example the
+ *	white list).
+ * @HCI_DM_FIELD_GENERATOR: Allow to generate different kind of RF
+ *	technology. When using this command to anti-collision is done.
+ * @HCI_LOOPBACK: Allow to echo a command and test the Dh to CLF
+ *	connectivity.
+ * @HCI_DM_VDC_MEASUREMENT_VALUE: Allow to measure the field applied on the
+ *	CLF antenna. A value between 0 and 0x0f is returned. 0 is maximum.
+ * @HCI_DM_FWUPD_START: Allow to put CLF into firmware update mode. It is a
+ *	specific CLF command as there is no GPIO for this.
+ * @HCI_DM_FWUPD_END:  Allow to complete firmware update.
+ * @HCI_DM_VDC_VALUE_COMPARISON: Allow to compare the field applied on the
+ *	CLF antenna to a reference value.
+ * @MANUFACTURER_SPECIFIC: Allow to retrieve manufacturer specific data
+ *	received during a NCI_CORE_INIT_CMD.
+ */
+enum nfc_vendor_cmds {
+	FACTORY_MODE,
+	HCI_CLEAR_ALL_PIPES,
+	HCI_DM_PUT_DATA,
+	HCI_DM_UPDATE_AID,
+	HCI_DM_GET_INFO,
+	HCI_DM_GET_DATA,
+	HCI_DM_DIRECT_LOAD,
+	HCI_DM_RESET,
+	HCI_GET_PARAM,
+	HCI_DM_FIELD_GENERATOR,
+	HCI_LOOPBACK,
+	HCI_DM_FWUPD_START,
+	HCI_DM_FWUPD_END,
+	HCI_DM_VDC_MEASUREMENT_VALUE,
+	HCI_DM_VDC_VALUE_COMPARISON,
+	MANUFACTURER_SPECIFIC,
+};
+
+struct st_nci_vendor_info {
+	struct completion req_completion;
+	struct sk_buff *rx_skb;
+};
+
 struct st_nci_info {
 	struct llt_ndlc *ndlc;
 	unsigned long flags;
+
 	struct st_nci_se_info se_info;
+	struct st_nci_vendor_info vendor_info;
 };
 
 void st_nci_remove(struct nci_dev *ndev);
 int st_nci_probe(struct llt_ndlc *ndlc, int phy_headroom,
-		int phy_tailroom);
+		 int phy_tailroom, struct st_nci_se_status *se_status);
+
+int st_nci_se_init(struct nci_dev *ndev, struct st_nci_se_status *se_status);
+void st_nci_se_deinit(struct nci_dev *ndev);
+
+int st_nci_discover_se(struct nci_dev *ndev);
+int st_nci_enable_se(struct nci_dev *ndev, u32 se_idx);
+int st_nci_disable_se(struct nci_dev *ndev, u32 se_idx);
+int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
+				u8 *apdu, size_t apdu_length,
+				se_io_cb_t cb, void *cb_context);
+int st_nci_hci_load_session(struct nci_dev *ndev);
+void st_nci_hci_event_received(struct nci_dev *ndev, u8 pipe,
+					u8 event, struct sk_buff *skb);
+void st_nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
+						struct sk_buff *skb);
+
+void st_nci_hci_loopback_event_received(struct nci_dev *ndev, u8 event,
+					struct sk_buff *skb);
+int st_nci_vendor_cmds_init(struct nci_dev *ndev);
 
 #endif /* __LOCAL_ST_NCI_H_ */
diff --git a/drivers/nfc/st-nci/st-nci_se.h b/drivers/nfc/st-nci/st-nci_se.h
deleted file mode 100644
index ea66e87..0000000
--- a/drivers/nfc/st-nci/st-nci_se.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Secure Element Driver for STMicroelectronics NFC NCI Chip
- *
- * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __LOCAL_ST_NCI_SE_H_
-#define __LOCAL_ST_NCI_SE_H_
-
-/*
- * ref ISO7816-3 chap 8.1. the initial character TS is followed by a
- * sequence of at most 32 characters.
- */
-#define ST_NCI_ESE_MAX_LENGTH	33
-#define ST_NCI_HCI_HOST_ID_ESE	0xc0
-
-struct st_nci_se_info {
-	u8 atr[ST_NCI_ESE_MAX_LENGTH];
-	struct completion req_completion;
-
-	struct timer_list bwi_timer;
-	int wt_timeout; /* in msecs */
-	bool bwi_active;
-
-	struct timer_list se_active_timer;
-	bool se_active;
-
-	bool xch_error;
-
-	se_io_cb_t cb;
-	void *cb_context;
-};
-
-int st_nci_se_init(struct nci_dev *ndev);
-void st_nci_se_deinit(struct nci_dev *ndev);
-
-int st_nci_discover_se(struct nci_dev *ndev);
-int st_nci_enable_se(struct nci_dev *ndev, u32 se_idx);
-int st_nci_disable_se(struct nci_dev *ndev, u32 se_idx);
-int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
-		u8 *apdu, size_t apdu_length,
-		se_io_cb_t cb, void *cb_context);
-int st_nci_hci_load_session(struct nci_dev *ndev);
-void st_nci_hci_event_received(struct nci_dev *ndev, u8 pipe,
-			u8 event, struct sk_buff *skb);
-void st_nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
-			struct sk_buff *skb);
-
-
-#endif /* __LOCAL_ST_NCI_SE_H_ */
diff --git a/drivers/nfc/st-nci/vendor_cmds.c b/drivers/nfc/st-nci/vendor_cmds.c
new file mode 100644
index 0000000..b5debce
--- /dev/null
+++ b/drivers/nfc/st-nci/vendor_cmds.c
@@ -0,0 +1,516 @@
+/*
+ * Proprietary commands extension for STMicroelectronics NFC NCI Chip
+ *
+ * Copyright (C) 2014-2015  STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <net/genetlink.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/delay.h>
+#include <net/nfc/nci_core.h>
+
+#include "st-nci.h"
+
+#define ST_NCI_HCI_DM_GETDATA			0x10
+#define ST_NCI_HCI_DM_PUTDATA			0x11
+#define ST_NCI_HCI_DM_LOAD			0x12
+#define ST_NCI_HCI_DM_GETINFO			0x13
+#define ST_NCI_HCI_DM_FWUPD_START		0x14
+#define ST_NCI_HCI_DM_FWUPD_STOP		0x15
+#define ST_NCI_HCI_DM_UPDATE_AID		0x20
+#define ST_NCI_HCI_DM_RESET			0x3e
+
+#define ST_NCI_HCI_DM_FIELD_GENERATOR		0x32
+#define ST_NCI_HCI_DM_VDC_MEASUREMENT_VALUE	0x33
+#define ST_NCI_HCI_DM_VDC_VALUE_COMPARISON	0x34
+
+#define ST_NCI_FACTORY_MODE_ON			1
+#define ST_NCI_FACTORY_MODE_OFF			0
+
+#define ST_NCI_EVT_POST_DATA			0x02
+
+struct get_param_data {
+	u8 gate;
+	u8 data;
+} __packed;
+
+static int st_nci_factory_mode(struct nfc_dev *dev, void *data,
+			       size_t data_len)
+{
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	if (data_len != 1)
+		return -EINVAL;
+
+	pr_debug("factory mode: %x\n", ((u8 *)data)[0]);
+
+	switch (((u8 *)data)[0]) {
+	case ST_NCI_FACTORY_MODE_ON:
+		test_and_set_bit(ST_NCI_FACTORY_MODE, &info->flags);
+	break;
+	case ST_NCI_FACTORY_MODE_OFF:
+		clear_bit(ST_NCI_FACTORY_MODE, &info->flags);
+	break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int st_nci_hci_clear_all_pipes(struct nfc_dev *dev, void *data,
+				      size_t data_len)
+{
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	return nci_hci_clear_all_pipes(ndev);
+}
+
+static int st_nci_hci_dm_put_data(struct nfc_dev *dev, void *data,
+				  size_t data_len)
+{
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	return nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+				ST_NCI_HCI_DM_PUTDATA, data,
+				data_len, NULL);
+}
+
+static int st_nci_hci_dm_update_aid(struct nfc_dev *dev, void *data,
+				    size_t data_len)
+{
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	return nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+			ST_NCI_HCI_DM_UPDATE_AID, data, data_len, NULL);
+}
+
+static int st_nci_hci_dm_get_info(struct nfc_dev *dev, void *data,
+				  size_t data_len)
+{
+	int r;
+	struct sk_buff *msg, *skb;
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_GETINFO,
+			     data, data_len, &skb);
+	if (r)
+		goto exit;
+
+	msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
+					     HCI_DM_GET_INFO, skb->len);
+	if (!msg) {
+		r = -ENOMEM;
+		goto free_skb;
+	}
+
+	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
+		kfree_skb(msg);
+		r = -ENOBUFS;
+		goto free_skb;
+	}
+
+	r = nfc_vendor_cmd_reply(msg);
+
+free_skb:
+	kfree_skb(skb);
+exit:
+	return r;
+}
+
+static int st_nci_hci_dm_get_data(struct nfc_dev *dev, void *data,
+				  size_t data_len)
+{
+	int r;
+	struct sk_buff *msg, *skb;
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_GETDATA,
+			     data, data_len, &skb);
+	if (r)
+		goto exit;
+
+	msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
+					     HCI_DM_GET_DATA, skb->len);
+	if (!msg) {
+		r = -ENOMEM;
+		goto free_skb;
+	}
+
+	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
+		kfree_skb(msg);
+		r = -ENOBUFS;
+		goto free_skb;
+	}
+
+	r = nfc_vendor_cmd_reply(msg);
+
+free_skb:
+	kfree_skb(skb);
+exit:
+	return r;
+}
+
+static int st_nci_hci_dm_fwupd_start(struct nfc_dev *dev, void *data,
+				     size_t data_len)
+{
+	int r;
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	dev->fw_download_in_progress = true;
+	r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+			ST_NCI_HCI_DM_FWUPD_START, data, data_len, NULL);
+	if (r)
+		dev->fw_download_in_progress = false;
+
+	return r;
+}
+
+static int st_nci_hci_dm_fwupd_end(struct nfc_dev *dev, void *data,
+				   size_t data_len)
+{
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	return nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+			ST_NCI_HCI_DM_FWUPD_STOP, data, data_len, NULL);
+}
+
+static int st_nci_hci_dm_direct_load(struct nfc_dev *dev, void *data,
+				     size_t data_len)
+{
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	if (dev->fw_download_in_progress) {
+		dev->fw_download_in_progress = false;
+		return nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+				ST_NCI_HCI_DM_LOAD, data, data_len, NULL);
+	}
+	return -EPROTO;
+}
+
+static int st_nci_hci_dm_reset(struct nfc_dev *dev, void *data,
+			       size_t data_len)
+{
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+			ST_NCI_HCI_DM_RESET, data, data_len, NULL);
+	msleep(200);
+
+	return 0;
+}
+
+static int st_nci_hci_get_param(struct nfc_dev *dev, void *data,
+				size_t data_len)
+{
+	int r;
+	struct sk_buff *msg, *skb;
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+	struct get_param_data *param = (struct get_param_data *)data;
+
+	if (data_len < sizeof(struct get_param_data))
+		return -EPROTO;
+
+	r = nci_hci_get_param(ndev, param->gate, param->data, &skb);
+	if (r)
+		goto exit;
+
+	msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
+					     HCI_GET_PARAM, skb->len);
+	if (!msg) {
+		r = -ENOMEM;
+		goto free_skb;
+	}
+
+	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
+		kfree_skb(msg);
+		r = -ENOBUFS;
+		goto free_skb;
+	}
+
+	r = nfc_vendor_cmd_reply(msg);
+
+free_skb:
+	kfree_skb(skb);
+exit:
+	return r;
+}
+
+static int st_nci_hci_dm_field_generator(struct nfc_dev *dev, void *data,
+					 size_t data_len)
+{
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	return nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+				ST_NCI_HCI_DM_FIELD_GENERATOR, data, data_len, NULL);
+}
+
+static int st_nci_hci_dm_vdc_measurement_value(struct nfc_dev *dev, void *data,
+					       size_t data_len)
+{
+	int r;
+	struct sk_buff *msg, *skb;
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	if (data_len != 4)
+		return -EPROTO;
+
+	r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+			     ST_NCI_HCI_DM_VDC_MEASUREMENT_VALUE,
+			     data, data_len, &skb);
+	if (r)
+		goto exit;
+
+	msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
+				HCI_DM_VDC_MEASUREMENT_VALUE, skb->len);
+	if (!msg) {
+		r = -ENOMEM;
+		goto free_skb;
+	}
+
+	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
+		kfree_skb(msg);
+		r = -ENOBUFS;
+		goto free_skb;
+	}
+
+	r = nfc_vendor_cmd_reply(msg);
+
+free_skb:
+	kfree_skb(skb);
+exit:
+	return r;
+}
+
+static int st_nci_hci_dm_vdc_value_comparison(struct nfc_dev *dev, void *data,
+					      size_t data_len)
+{
+	int r;
+	struct sk_buff *msg, *skb;
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	if (data_len != 2)
+		return -EPROTO;
+
+	r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
+			     ST_NCI_HCI_DM_VDC_VALUE_COMPARISON,
+			     data, data_len, &skb);
+	if (r)
+		goto exit;
+
+	msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
+					HCI_DM_VDC_VALUE_COMPARISON, skb->len);
+	if (!msg) {
+		r = -ENOMEM;
+		goto free_skb;
+	}
+
+	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
+		kfree_skb(msg);
+		r = -ENOBUFS;
+		goto free_skb;
+	}
+
+	r = nfc_vendor_cmd_reply(msg);
+
+free_skb:
+	kfree_skb(skb);
+exit:
+	return r;
+}
+
+void st_nci_hci_loopback_event_received(struct nci_dev *ndev, u8 event,
+					struct sk_buff *skb)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	switch (event) {
+	case ST_NCI_EVT_POST_DATA:
+		info->vendor_info.rx_skb = skb;
+	break;
+	default:
+		nfc_err(&ndev->nfc_dev->dev, "Unexpected event on loopback gate\n");
+	}
+	complete(&info->vendor_info.req_completion);
+}
+EXPORT_SYMBOL(st_nci_hci_loopback_event_received);
+
+static int st_nci_hci_loopback(struct nfc_dev *dev, void *data,
+			       size_t data_len)
+{
+	int r;
+	struct sk_buff *msg;
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	if (data_len <= 0)
+		return -EPROTO;
+
+	reinit_completion(&info->vendor_info.req_completion);
+	info->vendor_info.rx_skb = NULL;
+
+	r = nci_hci_send_event(ndev, NCI_HCI_LOOPBACK_GATE,
+			       ST_NCI_EVT_POST_DATA, data, data_len);
+	if (r != data_len) {
+		r = -EPROTO;
+		goto exit;
+	}
+
+	wait_for_completion_interruptible(&info->vendor_info.req_completion);
+
+	if (!info->vendor_info.rx_skb ||
+	    info->vendor_info.rx_skb->len != data_len) {
+		r = -EPROTO;
+		goto exit;
+	}
+
+	msg = nfc_vendor_cmd_alloc_reply_skb(ndev->nfc_dev,
+					ST_NCI_VENDOR_OUI,
+					HCI_LOOPBACK,
+					info->vendor_info.rx_skb->len);
+	if (!msg) {
+		r = -ENOMEM;
+		goto free_skb;
+	}
+
+	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, info->vendor_info.rx_skb->len,
+		    info->vendor_info.rx_skb->data)) {
+		kfree_skb(msg);
+		r = -ENOBUFS;
+		goto free_skb;
+	}
+
+	r = nfc_vendor_cmd_reply(msg);
+free_skb:
+	kfree_skb(info->vendor_info.rx_skb);
+exit:
+	return r;
+}
+
+static int st_nci_manufacturer_specific(struct nfc_dev *dev, void *data,
+					size_t data_len)
+{
+	struct sk_buff *msg;
+	struct nci_dev *ndev = nfc_get_drvdata(dev);
+
+	msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
+					MANUFACTURER_SPECIFIC,
+					sizeof(ndev->manufact_specific_info));
+	if (!msg)
+		return -ENOMEM;
+
+	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, sizeof(ndev->manufact_specific_info),
+		    &ndev->manufact_specific_info)) {
+		kfree_skb(msg);
+		return -ENOBUFS;
+	}
+
+	return nfc_vendor_cmd_reply(msg);
+}
+
+static struct nfc_vendor_cmd st_nci_vendor_cmds[] = {
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = FACTORY_MODE,
+		.doit = st_nci_factory_mode,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_CLEAR_ALL_PIPES,
+		.doit = st_nci_hci_clear_all_pipes,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_DM_PUT_DATA,
+		.doit = st_nci_hci_dm_put_data,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_DM_UPDATE_AID,
+		.doit = st_nci_hci_dm_update_aid,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_DM_GET_INFO,
+		.doit = st_nci_hci_dm_get_info,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_DM_GET_DATA,
+		.doit = st_nci_hci_dm_get_data,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_DM_DIRECT_LOAD,
+		.doit = st_nci_hci_dm_direct_load,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_DM_RESET,
+		.doit = st_nci_hci_dm_reset,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_GET_PARAM,
+		.doit = st_nci_hci_get_param,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_DM_FIELD_GENERATOR,
+		.doit = st_nci_hci_dm_field_generator,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_DM_FWUPD_START,
+		.doit = st_nci_hci_dm_fwupd_start,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_DM_FWUPD_END,
+		.doit = st_nci_hci_dm_fwupd_end,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_LOOPBACK,
+		.doit = st_nci_hci_loopback,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_DM_VDC_MEASUREMENT_VALUE,
+		.doit = st_nci_hci_dm_vdc_measurement_value,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = HCI_DM_VDC_VALUE_COMPARISON,
+		.doit = st_nci_hci_dm_vdc_value_comparison,
+	},
+	{
+		.vendor_id = ST_NCI_VENDOR_OUI,
+		.subcmd = MANUFACTURER_SPECIFIC,
+		.doit = st_nci_manufacturer_specific,
+	},
+};
+
+int st_nci_vendor_cmds_init(struct nci_dev *ndev)
+{
+	struct st_nci_info *info = nci_get_drvdata(ndev);
+
+	init_completion(&info->vendor_info.req_completion);
+	return nfc_set_vendor_cmds(ndev->nfc_dev, st_nci_vendor_cmds,
+				   sizeof(st_nci_vendor_cmds));
+}
+EXPORT_SYMBOL(st_nci_vendor_cmds_init);
diff --git a/drivers/nfc/st21nfca/Makefile b/drivers/nfc/st21nfca/Makefile
index 97edab4..ded6489 100644
--- a/drivers/nfc/st21nfca/Makefile
+++ b/drivers/nfc/st21nfca/Makefile
@@ -2,7 +2,7 @@
 # Makefile for ST21NFCA HCI based NFC driver
 #
 
-st21nfca_hci-objs = st21nfca.o st21nfca_dep.o st21nfca_se.o
+st21nfca_hci-objs = core.o dep.o se.o vendor_cmds.o
 obj-$(CONFIG_NFC_ST21NFCA)     += st21nfca_hci.o
 
 st21nfca_i2c-objs  = i2c.o
diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/core.c
similarity index 96%
rename from drivers/nfc/st21nfca/st21nfca.c
rename to drivers/nfc/st21nfca/core.c
index 0512865..dd8b150 100644
--- a/drivers/nfc/st21nfca/st21nfca.c
+++ b/drivers/nfc/st21nfca/core.c
@@ -22,8 +22,6 @@
 #include <net/nfc/llc.h>
 
 #include "st21nfca.h"
-#include "st21nfca_dep.h"
-#include "st21nfca_se.h"
 
 #define DRIVER_DESC "HCI NFC driver for ST21NFCA"
 
@@ -87,12 +85,13 @@
 
 static struct nfc_hci_gate st21nfca_gates[] = {
 	{NFC_HCI_ADMIN_GATE, NFC_HCI_ADMIN_PIPE},
+	{NFC_HCI_LINK_MGMT_GATE, NFC_HCI_LINK_MGMT_PIPE},
+	{ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE},
+
 	{NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
 	{NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE},
-	{NFC_HCI_LINK_MGMT_GATE, NFC_HCI_LINK_MGMT_PIPE},
 	{NFC_HCI_RF_READER_B_GATE, NFC_HCI_INVALID_PIPE},
 	{NFC_HCI_RF_READER_A_GATE, NFC_HCI_INVALID_PIPE},
-	{ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE},
 	{ST21NFCA_RF_READER_F_GATE, NFC_HCI_INVALID_PIPE},
 	{ST21NFCA_RF_READER_14443_3_A_GATE, NFC_HCI_INVALID_PIPE},
 	{ST21NFCA_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE},
@@ -163,7 +162,6 @@
 		r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
 					ST21NFCA_DM_GETINFO, pipe_info,
 					sizeof(pipe_info), &skb_pipe_info);
-
 		if (r)
 			continue;
 
@@ -185,43 +183,33 @@
 			continue;
 		}
 
-		for (j = 0; (j < ARRAY_SIZE(st21nfca_gates)) &&
+		for (j = 3; (j < ARRAY_SIZE(st21nfca_gates)) &&
 			(st21nfca_gates[j].gate != info->dst_gate_id) ; j++)
 			;
 
 		if (j < ARRAY_SIZE(st21nfca_gates) &&
 			st21nfca_gates[j].gate == info->dst_gate_id &&
 			ST21NFCA_DM_IS_PIPE_OPEN(info->pipe_state)) {
-			st21nfca_gates[j].pipe = pipe_info[2];
+			hdev->init_data.gates[j].pipe = pipe_info[2];
 
 			hdev->gate2pipe[st21nfca_gates[j].gate] =
-							st21nfca_gates[j].pipe;
-			hdev->pipes[st21nfca_gates[j].pipe].gate =
-							st21nfca_gates[j].gate;
-			hdev->pipes[st21nfca_gates[j].pipe].dest_host =
-							info->src_host_id;
+						pipe_info[2];
+			hdev->pipes[pipe_info[2]].gate =
+						st21nfca_gates[j].gate;
+			hdev->pipes[pipe_info[2]].dest_host =
+						info->src_host_id;
 		}
 		kfree_skb(skb_pipe_info);
 	}
 
 	/*
-	 * 3 gates have a well known pipe ID.
-	 * They will never appear in the pipe list
+	 * 3 gates have a well known pipe ID. Only NFC_HCI_LINK_MGMT_GATE
+	 * is not yet open at this stage.
 	 */
-	if (skb_pipe_list->len + 3 < ARRAY_SIZE(st21nfca_gates)) {
-		for (i = skb_pipe_list->len + 3;
-				i < ARRAY_SIZE(st21nfca_gates) - 2; i++) {
-			r = nfc_hci_connect_gate(hdev,
-					NFC_HCI_HOST_CONTROLLER_ID,
-					st21nfca_gates[i].gate,
-					st21nfca_gates[i].pipe);
-			if (r < 0)
-				goto free_list;
-		}
-	}
+	r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
+				 NFC_HCI_LINK_MGMT_GATE,
+				 NFC_HCI_LINK_MGMT_PIPE);
 
-	memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
-free_list:
 	kfree_skb(skb_pipe_list);
 	return r;
 }
@@ -905,6 +893,8 @@
 			}
 		}
 	break;
+	default:
+		nfc_err(&hdev->ndev->dev, "Unexpected event on admin gate\n");
 	}
 	kfree_skb(skb);
 	return 0;
@@ -933,6 +923,8 @@
 							event, skb);
 	case ST21NFCA_APDU_READER_GATE:
 		return st21nfca_apdu_reader_event_received(hdev, event, skb);
+	case NFC_HCI_LOOPBACK_GATE:
+		return st21nfca_hci_loopback_event_received(hdev, event, skb);
 	default:
 		return 1;
 	}
@@ -993,7 +985,6 @@
 	 * persistent info to discriminate 2 identical chips
 	 */
 	dev_num = find_first_zero_bit(dev_mask, ST21NFCA_NUM_DEVICES);
-
 	if (dev_num >= ST21NFCA_NUM_DEVICES)
 		return -ENODEV;
 
@@ -1035,6 +1026,7 @@
 	*hdev = info->hdev;
 	st21nfca_dep_init(info->hdev);
 	st21nfca_se_init(info->hdev);
+	st21nfca_vendor_cmds_init(info->hdev);
 
 	return 0;
 
diff --git a/drivers/nfc/st21nfca/st21nfca_dep.c b/drivers/nfc/st21nfca/dep.c
similarity index 99%
rename from drivers/nfc/st21nfca/st21nfca_dep.c
rename to drivers/nfc/st21nfca/dep.c
index 8882181..798a32b 100644
--- a/drivers/nfc/st21nfca/st21nfca_dep.c
+++ b/drivers/nfc/st21nfca/dep.c
@@ -17,7 +17,6 @@
 #include <net/nfc/hci.h>
 
 #include "st21nfca.h"
-#include "st21nfca_dep.h"
 
 #define ST21NFCA_NFCIP1_INITIATOR 0x00
 #define ST21NFCA_NFCIP1_REQ 0xd4
@@ -436,6 +435,7 @@
 			return r;
 		return 0;
 	default:
+		nfc_err(&hdev->ndev->dev, "Unexpected event on card f gate\n");
 		return 1;
 	}
 	kfree_skb(skb);
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index a321439..a98da33 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -94,6 +94,7 @@
 	int hard_fault;
 	struct mutex phy_lock;
 };
+
 static u8 len_seq[] = { 16, 24, 12, 29 };
 static u16 wait_tab[] = { 2, 3, 5, 15, 20, 40};
 
diff --git a/drivers/nfc/st21nfca/st21nfca_se.c b/drivers/nfc/st21nfca/se.c
similarity index 96%
rename from drivers/nfc/st21nfca/st21nfca_se.c
rename to drivers/nfc/st21nfca/se.c
index 3197e9b..c79d99b 100644
--- a/drivers/nfc/st21nfca/st21nfca_se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -17,10 +17,9 @@
 #include <net/nfc/hci.h>
 
 #include "st21nfca.h"
-#include "st21nfca_se.h"
 
 #define ST21NFCA_EVT_UICC_ACTIVATE		0x10
-#define ST21NFCA_EVT_UICC_DEACTIVATE	0x13
+#define ST21NFCA_EVT_UICC_DEACTIVATE		0x13
 #define ST21NFCA_EVT_SE_HARD_RESET		0x20
 #define ST21NFCA_EVT_SE_SOFT_RESET		0x11
 #define ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER	0x21
@@ -101,7 +100,7 @@
 				u8 state)
 {
 	struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
-	int r;
+	int r, i;
 	struct sk_buff *sk_host_list;
 	u8 se_event, host_id;
 
@@ -149,7 +148,10 @@
 	if (r < 0)
 		return r;
 
-	host_id = sk_host_list->data[sk_host_list->len - 1];
+	for (i = 0; i < sk_host_list->len &&
+		sk_host_list->data[i] != se_idx; i++)
+		;
+	host_id = sk_host_list->data[i];
 	kfree_skb(sk_host_list);
 
 	if (state == ST21NFCA_SE_MODE_ON && host_id == se_idx)
@@ -165,6 +167,9 @@
 	struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
 	int se_count = 0;
 
+	if (test_bit(ST21NFCA_FACTORY_MODE, &hdev->quirks))
+		return 0;
+
 	if (info->se_status->is_uicc_present) {
 		nfc_add_se(hdev->ndev, NFC_HCI_UICC_HOST_ID, NFC_SE_UICC);
 		se_count++;
@@ -189,7 +194,6 @@
 	 * Same for eSE.
 	 */
 	r = st21nfca_hci_control_se(hdev, se_idx, ST21NFCA_SE_MODE_ON);
-
 	if (r == ST21NFCA_ESE_HOST_ID) {
 		st21nfca_se_get_atr(hdev);
 		r = nfc_hci_send_event(hdev, ST21NFCA_APDU_READER_GATE,
@@ -340,6 +344,7 @@
 		r = nfc_se_transaction(hdev->ndev, host, transaction);
 		break;
 	default:
+		nfc_err(&hdev->ndev->dev, "Unexpected event on connectivity gate\n");
 		return 1;
 	}
 	kfree_skb(skb);
@@ -371,6 +376,9 @@
 		mod_timer(&info->se_info.bwi_timer, jiffies +
 				msecs_to_jiffies(info->se_info.wt_timeout));
 		break;
+	default:
+		nfc_err(&hdev->ndev->dev, "Unexpected event on apdu reader gate\n");
+		return 1;
 	}
 
 exit:
diff --git a/drivers/nfc/st21nfca/st21nfca.h b/drivers/nfc/st21nfca/st21nfca.h
index 15a78d3..94ffb05 100644
--- a/drivers/nfc/st21nfca/st21nfca.h
+++ b/drivers/nfc/st21nfca/st21nfca.h
@@ -18,9 +18,8 @@
 #define __LOCAL_ST21NFCA_H_
 
 #include <net/nfc/hci.h>
-
-#include "st21nfca_dep.h"
-#include "st21nfca_se.h"
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
 
 #define HCI_MODE 0
 
@@ -46,28 +45,115 @@
 #define ST21NFCA_HCI_LLC_MAX_SIZE       (ST21NFCA_HCI_LLC_LEN_CRC + 1 + \
 					ST21NFCA_HCI_LLC_MAX_PAYLOAD)
 
+/* Reader RF commands */
+#define ST21NFCA_WR_XCHG_DATA           0x10
+
+#define ST21NFCA_DEVICE_MGNT_GATE       0x01
+#define ST21NFCA_RF_READER_F_GATE       0x14
+#define ST21NFCA_RF_CARD_F_GATE		0x24
+#define ST21NFCA_APDU_READER_GATE	0xf0
+#define ST21NFCA_CONNECTIVITY_GATE	0x41
+
+/*
+ * ref ISO7816-3 chap 8.1. the initial character TS is followed by a
+ * sequence of at most 32 characters.
+ */
+#define ST21NFCA_ESE_MAX_LENGTH		33
+#define ST21NFCA_ESE_HOST_ID		0xc0
+
 #define DRIVER_DESC "HCI NFC driver for ST21NFCA"
 
-#define ST21NFCA_HCI_MODE 0
+#define ST21NFCA_HCI_MODE		0
+#define ST21NFCA_NUM_DEVICES		256
 
-#define ST21NFCA_NUM_DEVICES 256
+#define ST21NFCA_VENDOR_OUI		0x0080E1 /* STMicroelectronics */
+#define ST21NFCA_FACTORY_MODE		2
 
 struct st21nfca_se_status {
 	bool is_ese_present;
 	bool is_uicc_present;
 };
 
-int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
-		       char *llc_name, int phy_headroom, int phy_tailroom,
-		       int phy_payload, struct nfc_hci_dev **hdev,
-			   struct st21nfca_se_status *se_status);
-void st21nfca_hci_remove(struct nfc_hci_dev *hdev);
-
 enum st21nfca_state {
 	ST21NFCA_ST_COLD,
 	ST21NFCA_ST_READY,
 };
 
+/**
+ * enum nfc_vendor_cmds - supported nfc vendor commands
+ *
+ * @FACTORY_MODE: Allow to set the driver into a mode where no secure element
+ *	are activated. It does not consider any NFC_ATTR_VENDOR_DATA.
+ * @HCI_CLEAR_ALL_PIPES: Allow to execute a HCI clear all pipes command.
+ *	It does not consider any NFC_ATTR_VENDOR_DATA.
+ * @HCI_DM_PUT_DATA: Allow to configure specific CLF registry as for example
+ *	RF trimmings or low level drivers configurations (I2C, SPI, SWP).
+ * @HCI_DM_UPDATE_AID: Allow to configure an AID routing into the CLF routing
+ *	table following RF technology, CLF mode or protocol.
+ * @HCI_DM_GET_INFO: Allow to retrieve CLF information.
+ * @HCI_DM_GET_DATA: Allow to retrieve CLF configurable data such as low
+ *	level drivers configurations or RF trimmings.
+ * @HCI_DM_LOAD: Allow to load a firmware into the CLF. A complete
+ *	packet can be more than 8KB.
+ * @HCI_DM_RESET: Allow to run a CLF reset in order to "commit" CLF
+ *	configuration changes without CLF power off.
+ * @HCI_GET_PARAM: Allow to retrieve an HCI CLF parameter (for example the
+ *	white list).
+ * @HCI_DM_FIELD_GENERATOR: Allow to generate different kind of RF
+ *	technology. When using this command to anti-collision is done.
+ * @HCI_LOOPBACK: Allow to echo a command and test the Dh to CLF
+ *	connectivity.
+ */
+enum nfc_vendor_cmds {
+	FACTORY_MODE,
+	HCI_CLEAR_ALL_PIPES,
+	HCI_DM_PUT_DATA,
+	HCI_DM_UPDATE_AID,
+	HCI_DM_GET_INFO,
+	HCI_DM_GET_DATA,
+	HCI_DM_LOAD,
+	HCI_DM_RESET,
+	HCI_GET_PARAM,
+	HCI_DM_FIELD_GENERATOR,
+	HCI_LOOPBACK,
+};
+
+struct st21nfca_vendor_info {
+	struct completion req_completion;
+	struct sk_buff *rx_skb;
+};
+
+struct st21nfca_dep_info {
+	struct sk_buff *tx_pending;
+	struct work_struct tx_work;
+	u8 curr_nfc_dep_pni;
+	u32 idx;
+	u8 to;
+	u8 did;
+	u8 bsi;
+	u8 bri;
+	u8 lri;
+} __packed;
+
+struct st21nfca_se_info {
+	u8 atr[ST21NFCA_ESE_MAX_LENGTH];
+	struct completion req_completion;
+
+	struct timer_list bwi_timer;
+	int wt_timeout; /* in msecs */
+	bool bwi_active;
+
+	struct timer_list se_active_timer;
+	bool se_active;
+	int expected_pipes;
+	int count_pipes;
+
+	bool xch_error;
+
+	se_io_cb_t cb;
+	void *cb_context;
+};
+
 struct st21nfca_hci_info {
 	struct nfc_phy_ops *phy_ops;
 	void *phy_id;
@@ -85,15 +171,41 @@
 
 	struct st21nfca_dep_info dep_info;
 	struct st21nfca_se_info se_info;
+	struct st21nfca_vendor_info vendor_info;
 };
 
-/* Reader RF commands */
-#define ST21NFCA_WR_XCHG_DATA           0x10
+int st21nfca_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops,
+		       char *llc_name, int phy_headroom, int phy_tailroom,
+		       int phy_payload, struct nfc_hci_dev **hdev,
+		       struct st21nfca_se_status *se_status);
+void st21nfca_hci_remove(struct nfc_hci_dev *hdev);
 
-#define ST21NFCA_DEVICE_MGNT_GATE       0x01
-#define ST21NFCA_RF_READER_F_GATE       0x14
-#define ST21NFCA_RF_CARD_F_GATE			0x24
-#define ST21NFCA_APDU_READER_GATE		0xf0
-#define ST21NFCA_CONNECTIVITY_GATE		0x41
+int st21nfca_dep_event_received(struct nfc_hci_dev *hdev,
+				u8 event, struct sk_buff *skb);
+int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb);
+
+int st21nfca_im_send_atr_req(struct nfc_hci_dev *hdev, u8 *gb, size_t gb_len);
+int st21nfca_im_send_dep_req(struct nfc_hci_dev *hdev, struct sk_buff *skb);
+void st21nfca_dep_init(struct nfc_hci_dev *hdev);
+void st21nfca_dep_deinit(struct nfc_hci_dev *hdev);
+
+int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
+					u8 event, struct sk_buff *skb);
+int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev,
+					u8 event, struct sk_buff *skb);
+
+int st21nfca_hci_discover_se(struct nfc_hci_dev *hdev);
+int st21nfca_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx);
+int st21nfca_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx);
+int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
+		u8 *apdu, size_t apdu_length,
+		se_io_cb_t cb, void *cb_context);
+
+void st21nfca_se_init(struct nfc_hci_dev *hdev);
+void st21nfca_se_deinit(struct nfc_hci_dev *hdev);
+
+int st21nfca_hci_loopback_event_received(struct nfc_hci_dev *ndev, u8 event,
+					 struct sk_buff *skb);
+int st21nfca_vendor_cmds_init(struct nfc_hci_dev *ndev);
 
 #endif /* __LOCAL_ST21NFCA_H_ */
diff --git a/drivers/nfc/st21nfca/st21nfca_dep.h b/drivers/nfc/st21nfca/st21nfca_dep.h
deleted file mode 100644
index baf4664..0000000
--- a/drivers/nfc/st21nfca/st21nfca_dep.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ST21NFCA_DEP_H
-#define __ST21NFCA_DEP_H
-
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-
-struct st21nfca_dep_info {
-	struct sk_buff *tx_pending;
-	struct work_struct tx_work;
-	u8 curr_nfc_dep_pni;
-	u32 idx;
-	u8 to;
-	u8 did;
-	u8 bsi;
-	u8 bri;
-	u8 lri;
-} __packed;
-
-int st21nfca_dep_event_received(struct nfc_hci_dev *hdev,
-				u8 event, struct sk_buff *skb);
-int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb);
-
-int st21nfca_im_send_atr_req(struct nfc_hci_dev *hdev, u8 *gb, size_t gb_len);
-int st21nfca_im_send_dep_req(struct nfc_hci_dev *hdev, struct sk_buff *skb);
-void st21nfca_dep_init(struct nfc_hci_dev *hdev);
-void st21nfca_dep_deinit(struct nfc_hci_dev *hdev);
-#endif /* __ST21NFCA_DEP_H */
diff --git a/drivers/nfc/st21nfca/st21nfca_se.h b/drivers/nfc/st21nfca/st21nfca_se.h
deleted file mode 100644
index b172cfc..0000000
--- a/drivers/nfc/st21nfca/st21nfca_se.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2014  STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ST21NFCA_SE_H
-#define __ST21NFCA_SE_H
-
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-
-/*
- * ref ISO7816-3 chap 8.1. the initial character TS is followed by a
- * sequence of at most 32 characters.
- */
-#define ST21NFCA_ESE_MAX_LENGTH			33
-#define ST21NFCA_ESE_HOST_ID			0xc0
-
-struct st21nfca_se_info {
-	u8 atr[ST21NFCA_ESE_MAX_LENGTH];
-	struct completion req_completion;
-
-	struct timer_list bwi_timer;
-	int wt_timeout; /* in msecs */
-	bool bwi_active;
-
-	struct timer_list se_active_timer;
-	bool se_active;
-	int expected_pipes;
-	int count_pipes;
-
-	bool xch_error;
-
-	se_io_cb_t cb;
-	void *cb_context;
-};
-
-int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
-					u8 event, struct sk_buff *skb);
-int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev,
-					u8 event, struct sk_buff *skb);
-
-int st21nfca_hci_discover_se(struct nfc_hci_dev *hdev);
-int st21nfca_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx);
-int st21nfca_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx);
-int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx,
-		u8 *apdu, size_t apdu_length,
-		se_io_cb_t cb, void *cb_context);
-
-void st21nfca_se_init(struct nfc_hci_dev *hdev);
-void st21nfca_se_deinit(struct nfc_hci_dev *hdev);
-#endif /* __ST21NFCA_SE_H */
diff --git a/drivers/nfc/st21nfca/vendor_cmds.c b/drivers/nfc/st21nfca/vendor_cmds.c
new file mode 100644
index 0000000..ab765e5
--- /dev/null
+++ b/drivers/nfc/st21nfca/vendor_cmds.c
@@ -0,0 +1,375 @@
+/*
+ * Proprietary commands extension for STMicroelectronics NFC Chip
+ *
+ * Copyright (C) 2014-2015  STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <net/genetlink.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "st21nfca.h"
+
+#define ST21NFCA_HCI_DM_GETDATA			0x10
+#define ST21NFCA_HCI_DM_PUTDATA			0x11
+#define ST21NFCA_HCI_DM_LOAD			0x12
+#define ST21NFCA_HCI_DM_GETINFO			0x13
+#define ST21NFCA_HCI_DM_UPDATE_AID		0x20
+#define ST21NFCA_HCI_DM_RESET			0x3e
+
+#define ST21NFCA_HCI_DM_FIELD_GENERATOR		0x32
+
+#define ST21NFCA_FACTORY_MODE_ON		1
+#define ST21NFCA_FACTORY_MODE_OFF		0
+
+#define ST21NFCA_EVT_POST_DATA			0x02
+
+struct get_param_data {
+	u8 gate;
+	u8 data;
+} __packed;
+
+static int st21nfca_factory_mode(struct nfc_dev *dev, void *data,
+			       size_t data_len)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(dev);
+
+	if (data_len != 1)
+		return -EINVAL;
+
+	pr_debug("factory mode: %x\n", ((u8 *)data)[0]);
+
+	switch (((u8 *)data)[0]) {
+	case ST21NFCA_FACTORY_MODE_ON:
+		test_and_set_bit(ST21NFCA_FACTORY_MODE, &hdev->quirks);
+	break;
+	case ST21NFCA_FACTORY_MODE_OFF:
+		clear_bit(ST21NFCA_FACTORY_MODE, &hdev->quirks);
+	break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int st21nfca_hci_clear_all_pipes(struct nfc_dev *dev, void *data,
+				      size_t data_len)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(dev);
+
+	return nfc_hci_disconnect_all_gates(hdev);
+}
+
+static int st21nfca_hci_dm_put_data(struct nfc_dev *dev, void *data,
+				  size_t data_len)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(dev);
+
+	return nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+				ST21NFCA_HCI_DM_PUTDATA, data,
+				data_len, NULL);
+}
+
+static int st21nfca_hci_dm_update_aid(struct nfc_dev *dev, void *data,
+				    size_t data_len)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(dev);
+
+	return nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+			ST21NFCA_HCI_DM_UPDATE_AID, data, data_len, NULL);
+}
+
+static int st21nfca_hci_dm_get_info(struct nfc_dev *dev, void *data,
+				    size_t data_len)
+{
+	int r;
+	struct sk_buff *msg, *skb;
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(dev);
+
+	r = nfc_hci_send_cmd(hdev,
+			     ST21NFCA_DEVICE_MGNT_GATE,
+			     ST21NFCA_HCI_DM_GETINFO,
+			     data, data_len, &skb);
+	if (r)
+		goto exit;
+
+	msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST21NFCA_VENDOR_OUI,
+					     HCI_DM_GET_INFO, skb->len);
+	if (!msg) {
+		r = -ENOMEM;
+		goto free_skb;
+	}
+
+	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
+		kfree_skb(msg);
+		r = -ENOBUFS;
+		goto free_skb;
+	}
+
+	r = nfc_vendor_cmd_reply(msg);
+
+free_skb:
+	kfree_skb(skb);
+exit:
+	return r;
+}
+
+static int st21nfca_hci_dm_get_data(struct nfc_dev *dev, void *data,
+				    size_t data_len)
+{
+	int r;
+	struct sk_buff *msg, *skb;
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(dev);
+
+	r = nfc_hci_send_cmd(hdev,
+			     ST21NFCA_DEVICE_MGNT_GATE,
+			     ST21NFCA_HCI_DM_GETDATA,
+			     data, data_len, &skb);
+	if (r)
+		goto exit;
+
+	msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST21NFCA_VENDOR_OUI,
+					     HCI_DM_GET_DATA, skb->len);
+	if (!msg) {
+		r = -ENOMEM;
+		goto free_skb;
+	}
+
+	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
+		kfree_skb(msg);
+		r = -ENOBUFS;
+		goto free_skb;
+	}
+
+	r = nfc_vendor_cmd_reply(msg);
+
+free_skb:
+	kfree_skb(skb);
+exit:
+	return r;
+}
+
+static int st21nfca_hci_dm_load(struct nfc_dev *dev, void *data,
+				size_t data_len)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(dev);
+
+	return nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+				ST21NFCA_HCI_DM_LOAD, data, data_len, NULL);
+}
+
+static int st21nfca_hci_dm_reset(struct nfc_dev *dev, void *data,
+				 size_t data_len)
+{
+	int r;
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(dev);
+
+	r = nfc_hci_send_cmd_async(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+			ST21NFCA_HCI_DM_RESET, data, data_len, NULL, NULL);
+	if (r < 0)
+		return r;
+
+	r = nfc_llc_stop(hdev->llc);
+	if (r < 0)
+		return r;
+
+	return nfc_llc_start(hdev->llc);
+}
+
+static int st21nfca_hci_get_param(struct nfc_dev *dev, void *data,
+				  size_t data_len)
+{
+	int r;
+	struct sk_buff *msg, *skb;
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(dev);
+	struct get_param_data *param = (struct get_param_data *)data;
+
+	if (data_len < sizeof(struct get_param_data))
+		return -EPROTO;
+
+	r = nfc_hci_get_param(hdev, param->gate, param->data, &skb);
+	if (r)
+		goto exit;
+
+	msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST21NFCA_VENDOR_OUI,
+					     HCI_GET_PARAM, skb->len);
+	if (!msg) {
+		r = -ENOMEM;
+		goto free_skb;
+	}
+
+	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
+		kfree_skb(msg);
+		r = -ENOBUFS;
+		goto free_skb;
+	}
+
+	r = nfc_vendor_cmd_reply(msg);
+
+free_skb:
+	kfree_skb(skb);
+exit:
+	return r;
+}
+
+static int st21nfca_hci_dm_field_generator(struct nfc_dev *dev, void *data,
+					   size_t data_len)
+{
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(dev);
+
+	return nfc_hci_send_cmd(hdev,
+				ST21NFCA_DEVICE_MGNT_GATE,
+				ST21NFCA_HCI_DM_FIELD_GENERATOR,
+				data, data_len, NULL);
+}
+
+int st21nfca_hci_loopback_event_received(struct nfc_hci_dev *hdev, u8 event,
+					 struct sk_buff *skb)
+{
+	struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+	switch (event) {
+	case ST21NFCA_EVT_POST_DATA:
+		info->vendor_info.rx_skb = skb;
+	break;
+	default:
+		nfc_err(&hdev->ndev->dev, "Unexpected event on loopback gate\n");
+	}
+	complete(&info->vendor_info.req_completion);
+	return 0;
+}
+EXPORT_SYMBOL(st21nfca_hci_loopback_event_received);
+
+static int st21nfca_hci_loopback(struct nfc_dev *dev, void *data,
+				 size_t data_len)
+{
+	int r;
+	struct sk_buff *msg;
+	struct nfc_hci_dev *hdev = nfc_get_drvdata(dev);
+	struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+	if (data_len <= 0)
+		return -EPROTO;
+
+	reinit_completion(&info->vendor_info.req_completion);
+	info->vendor_info.rx_skb = NULL;
+
+	r = nfc_hci_send_event(hdev, NFC_HCI_LOOPBACK_GATE,
+			       ST21NFCA_EVT_POST_DATA, data, data_len);
+	if (r < 0) {
+		r = -EPROTO;
+		goto exit;
+	}
+
+	wait_for_completion_interruptible(&info->vendor_info.req_completion);
+	if (!info->vendor_info.rx_skb ||
+	    info->vendor_info.rx_skb->len != data_len) {
+		r = -EPROTO;
+		goto exit;
+	}
+
+	msg = nfc_vendor_cmd_alloc_reply_skb(hdev->ndev,
+					ST21NFCA_VENDOR_OUI,
+					HCI_LOOPBACK,
+					info->vendor_info.rx_skb->len);
+	if (!msg) {
+		r = -ENOMEM;
+		goto free_skb;
+	}
+
+	if (nla_put(msg, NFC_ATTR_VENDOR_DATA, info->vendor_info.rx_skb->len,
+		    info->vendor_info.rx_skb->data)) {
+		kfree_skb(msg);
+		r = -ENOBUFS;
+		goto free_skb;
+	}
+
+	r = nfc_vendor_cmd_reply(msg);
+free_skb:
+	kfree_skb(info->vendor_info.rx_skb);
+exit:
+	return r;
+}
+
+static struct nfc_vendor_cmd st21nfca_vendor_cmds[] = {
+	{
+		.vendor_id = ST21NFCA_VENDOR_OUI,
+		.subcmd = FACTORY_MODE,
+		.doit = st21nfca_factory_mode,
+	},
+	{
+		.vendor_id = ST21NFCA_VENDOR_OUI,
+		.subcmd = HCI_CLEAR_ALL_PIPES,
+		.doit = st21nfca_hci_clear_all_pipes,
+	},
+	{
+		.vendor_id = ST21NFCA_VENDOR_OUI,
+		.subcmd = HCI_DM_PUT_DATA,
+		.doit = st21nfca_hci_dm_put_data,
+	},
+	{
+		.vendor_id = ST21NFCA_VENDOR_OUI,
+		.subcmd = HCI_DM_UPDATE_AID,
+		.doit = st21nfca_hci_dm_update_aid,
+	},
+	{
+		.vendor_id = ST21NFCA_VENDOR_OUI,
+		.subcmd = HCI_DM_GET_INFO,
+		.doit = st21nfca_hci_dm_get_info,
+	},
+	{
+		.vendor_id = ST21NFCA_VENDOR_OUI,
+		.subcmd = HCI_DM_GET_DATA,
+		.doit = st21nfca_hci_dm_get_data,
+	},
+	{
+		.vendor_id = ST21NFCA_VENDOR_OUI,
+		.subcmd = HCI_DM_LOAD,
+		.doit = st21nfca_hci_dm_load,
+	},
+	{
+		.vendor_id = ST21NFCA_VENDOR_OUI,
+		.subcmd = HCI_DM_RESET,
+		.doit = st21nfca_hci_dm_reset,
+	},
+	{
+		.vendor_id = ST21NFCA_VENDOR_OUI,
+		.subcmd = HCI_GET_PARAM,
+		.doit = st21nfca_hci_get_param,
+	},
+	{
+		.vendor_id = ST21NFCA_VENDOR_OUI,
+		.subcmd = HCI_DM_FIELD_GENERATOR,
+		.doit = st21nfca_hci_dm_field_generator,
+	},
+	{
+		.vendor_id = ST21NFCA_VENDOR_OUI,
+		.subcmd = HCI_LOOPBACK,
+		.doit = st21nfca_hci_loopback,
+	},
+};
+
+int st21nfca_vendor_cmds_init(struct nfc_hci_dev *hdev)
+{
+	struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+	init_completion(&info->vendor_info.req_completion);
+	return nfc_set_vendor_cmds(hdev->ndev, st21nfca_vendor_cmds,
+				   sizeof(st21nfca_vendor_cmds));
+}
+EXPORT_SYMBOL(st21nfca_vendor_cmds_init);
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 70b0707..123aa98 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -2211,6 +2211,12 @@
 			trf7970a_pm_runtime_resume, NULL)
 };
 
+static const struct of_device_id trf7970a_of_match[] = {
+	{ .compatible = "ti,trf7970a", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, trf7970a_of_match);
+
 static const struct spi_device_id trf7970a_id_table[] = {
 	{ "trf7970a", 0 },
 	{ }
@@ -2223,6 +2229,7 @@
 	.id_table	= trf7970a_id_table,
 	.driver		= {
 		.name	= "trf7970a",
+		.of_match_table = of_match_ptr(trf7970a_of_match),
 		.owner	= THIS_MODULE,
 		.pm	= &trf7970a_pm_ops,
 	},
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index d3c6676..6fd4e5a 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -67,7 +67,7 @@
 	int rc;
 
 	/* Stop the user from reading */
-	if (pos > nvmem->size)
+	if (pos >= nvmem->size)
 		return 0;
 
 	if (pos + count > nvmem->size)
@@ -92,7 +92,7 @@
 	int rc;
 
 	/* Stop the user from writing */
-	if (pos > nvmem->size)
+	if (pos >= nvmem->size)
 		return 0;
 
 	if (pos + count > nvmem->size)
@@ -825,7 +825,7 @@
 		return rc;
 
 	/* shift bits in-place */
-	if (cell->bit_offset || cell->bit_offset)
+	if (cell->bit_offset || cell->nbits)
 		nvmem_shift_read_buffer_in_place(cell, buf);
 
 	*len = cell->bytes;
@@ -938,7 +938,7 @@
 	rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes);
 
 	/* free the tmp buffer */
-	if (cell->bit_offset)
+	if (cell->bit_offset || cell->nbits)
 		kfree(buf);
 
 	if (IS_ERR_VALUE(rc))
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 14777dd..cfa3b85 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -103,7 +103,7 @@
 	struct nvmem_device *nvmem;
 	struct regmap *regmap;
 	struct sunxi_sid *sid;
-	int i, size;
+	int ret, i, size;
 	char *randomness;
 
 	sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL);
@@ -131,6 +131,11 @@
 		return PTR_ERR(nvmem);
 
 	randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL);
+	if (!randomness) {
+		ret = -EINVAL;
+		goto err_unreg_nvmem;
+	}
+
 	for (i = 0; i < size; i++)
 		randomness[i] = sunxi_sid_read_byte(sid, i);
 
@@ -140,6 +145,10 @@
 	platform_set_drvdata(pdev, nvmem);
 
 	return 0;
+
+err_unreg_nvmem:
+	nvmem_unregister(nvmem);
+	return ret;
 }
 
 static int sunxi_sid_remove(struct platform_device *pdev)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d449714..4a7da3c 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1243,6 +1243,10 @@
 	BUG_ON(!chip);
 	if (!chip->irq_write_msi_msg)
 		chip->irq_write_msi_msg = pci_msi_domain_write_msg;
+	if (!chip->irq_mask)
+		chip->irq_mask = pci_msi_mask_irq;
+	if (!chip->irq_unmask)
+		chip->irq_unmask = pci_msi_unmask_irq;
 }
 
 /**
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index dd652f2..108a311 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -299,9 +299,10 @@
 	 * Unbound PCI devices are always put in D0, regardless of
 	 * runtime PM status.  During probe, the device is set to
 	 * active and the usage count is incremented.  If the driver
-	 * supports runtime PM, it should call pm_runtime_put_noidle()
-	 * in its probe routine and pm_runtime_get_noresume() in its
-	 * remove routine.
+	 * supports runtime PM, it should call pm_runtime_put_noidle(),
+	 * or any other runtime PM helper function decrementing the usage
+	 * count, in its probe routine and pm_runtime_get_noresume() in
+	 * its remove routine.
 	 */
 	pm_runtime_get_sync(dev);
 	pci_dev->driver = pci_drv;
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index 0062027..77a2e05 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -276,6 +276,7 @@
 	{ .compatible = "marvell,berlin2q-sata-phy" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match);
 
 static struct platform_driver phy_berlin_sata_driver = {
 	.probe	= phy_berlin_sata_probe,
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 49a1ed0..107cb57 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -432,6 +432,7 @@
 out:
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
 
 static
 int ufs_qcom_phy_disable_vreg(struct phy *phy,
@@ -474,6 +475,7 @@
 		phy->is_ref_clk_enabled = false;
 	}
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
 
 #define UFS_REF_CLK_EN	(1 << 5)
 
@@ -517,11 +519,13 @@
 {
 	ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk);
 
 void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
 {
 	ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
 
 /* Turn ON M-PHY RMMI interface clocks */
 int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
@@ -550,6 +554,7 @@
 out:
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
 
 /* Turn OFF M-PHY RMMI interface clocks */
 void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
@@ -562,6 +567,7 @@
 		phy->is_iface_clk_enabled = false;
 	}
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
 
 int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
 {
@@ -578,6 +584,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes);
 
 int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
 {
@@ -595,6 +602,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
 
 void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
 					  u8 major, u16 minor, u16 step)
@@ -605,6 +613,7 @@
 	ufs_qcom_phy->host_ctrl_rev_minor = minor;
 	ufs_qcom_phy->host_ctrl_rev_step = step;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
 
 int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
 {
@@ -625,6 +634,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
 
 int ufs_qcom_phy_remove(struct phy *generic_phy,
 			struct ufs_qcom_phy *ufs_qcom_phy)
@@ -662,6 +672,7 @@
 	return ufs_qcom_phy->phy_spec_ops->
 			is_physical_coding_sublayer_ready(ufs_qcom_phy);
 }
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready);
 
 int ufs_qcom_phy_power_on(struct phy *generic_phy)
 {
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 5a5c073..91d6f34 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -98,6 +98,7 @@
 	struct device_node *child;
 	struct regmap *grf;
 	unsigned int reg_offset;
+	int err;
 
 	grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
 	if (IS_ERR(grf)) {
@@ -129,6 +130,11 @@
 			return PTR_ERR(rk_phy->phy);
 		}
 		phy_set_drvdata(rk_phy->phy, rk_phy);
+
+		/* only power up usb phy when it use, so disable it when init*/
+		err = rockchip_usb_phy_power(rk_phy, 1);
+		if (err)
+			return err;
 	}
 
 	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
index faf6356..293ed43 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
@@ -26,7 +26,8 @@
 #include "pinctrl-imx.h"
 
 enum imx25_pads {
-	MX25_PAD_RESERVE0 = 1,
+	MX25_PAD_RESERVE0 = 0,
+	MX25_PAD_RESERVE1 = 1,
 	MX25_PAD_A10 = 2,
 	MX25_PAD_A13 = 3,
 	MX25_PAD_A14 = 4,
@@ -169,6 +170,7 @@
 /* Pad names for the pinmux subsystem */
 static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = {
 	IMX_PINCTRL_PIN(MX25_PAD_RESERVE0),
+	IMX_PINCTRL_PIN(MX25_PAD_RESERVE1),
 	IMX_PINCTRL_PIN(MX25_PAD_A10),
 	IMX_PINCTRL_PIN(MX25_PAD_A13),
 	IMX_PINCTRL_PIN(MX25_PAD_A14),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
index 6367661..f9a3f8f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
@@ -653,7 +653,7 @@
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "spi1"),		/* CS1 */
-		  SUNXI_FUNCTION(0x3, "uart3"),		/* PWM1 */
+		  SUNXI_FUNCTION(0x3, "pwm"),		/* PWM1 */
 		  SUNXI_FUNCTION(0x5, "uart2"),		/* CTS */
 		  SUNXI_FUNCTION_IRQ(0x6, 13)),		/* EINT13 */
 };
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
index 7e9dae5..2df8bbe 100644
--- a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
@@ -22,49 +22,49 @@
 #define DRIVER_NAME "ph1-sld8-pinctrl"
 
 static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
-	UNIPHIER_PINCTRL_PIN(0, "PCA00", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(0, "PCA00", 0,
 			     15, UNIPHIER_PIN_DRV_4_8,
 			     15, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(1, "PCA01", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(1, "PCA01", 0,
 			     16, UNIPHIER_PIN_DRV_4_8,
 			     16, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(2, "PCA02", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(2, "PCA02", 0,
 			     17, UNIPHIER_PIN_DRV_4_8,
 			     17, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(3, "PCA03", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(3, "PCA03", 0,
 			     18, UNIPHIER_PIN_DRV_4_8,
 			     18, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(4, "PCA04", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(4, "PCA04", 0,
 			     19, UNIPHIER_PIN_DRV_4_8,
 			     19, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(5, "PCA05", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(5, "PCA05", 0,
 			     20, UNIPHIER_PIN_DRV_4_8,
 			     20, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(6, "PCA06", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(6, "PCA06", 0,
 			     21, UNIPHIER_PIN_DRV_4_8,
 			     21, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(7, "PCA07", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(7, "PCA07", 0,
 			     22, UNIPHIER_PIN_DRV_4_8,
 			     22, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(8, "PCA08", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(8, "PCA08", 0,
 			     23, UNIPHIER_PIN_DRV_4_8,
 			     23, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(9, "PCA09", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(9, "PCA09", 0,
 			     24, UNIPHIER_PIN_DRV_4_8,
 			     24, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(10, "PCA10", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(10, "PCA10", 0,
 			     25, UNIPHIER_PIN_DRV_4_8,
 			     25, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(11, "PCA11", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(11, "PCA11", 0,
 			     26, UNIPHIER_PIN_DRV_4_8,
 			     26, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(12, "PCA12", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(12, "PCA12", 0,
 			     27, UNIPHIER_PIN_DRV_4_8,
 			     27, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(13, "PCA13", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(13, "PCA13", 0,
 			     28, UNIPHIER_PIN_DRV_4_8,
 			     28, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(14, "PCA14", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(14, "PCA14", 0,
 			     29, UNIPHIER_PIN_DRV_4_8,
 			     29, UNIPHIER_PIN_PULL_DOWN),
 	UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE,
@@ -118,199 +118,199 @@
 	UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE,
 			     36, UNIPHIER_PIN_DRV_8_12_16_20,
 			     128, UNIPHIER_PIN_PULL_UP),
-	UNIPHIER_PINCTRL_PIN(32, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(32, "SDCLK", 8,
 			     40, UNIPHIER_PIN_DRV_8_12_16_20,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(33, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(33, "SDCMD", 8,
 			     44, UNIPHIER_PIN_DRV_8_12_16_20,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(34, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(34, "SDDAT0", 8,
 			     48, UNIPHIER_PIN_DRV_8_12_16_20,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(35, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(35, "SDDAT1", 8,
 			     52, UNIPHIER_PIN_DRV_8_12_16_20,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(36, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(36, "SDDAT2", 8,
 			     56, UNIPHIER_PIN_DRV_8_12_16_20,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(37, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(37, "SDDAT3", 8,
 			     60, UNIPHIER_PIN_DRV_8_12_16_20,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(38, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(38, "SDCD", 8,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     129, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(39, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(39, "SDWP", 8,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     130, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(40, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(40, "SDVOLC", 9,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     131, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", 0,
 			     37, UNIPHIER_PIN_DRV_4_8,
 			     37, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(42, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(42, "USB0OD", 0,
 			     38, UNIPHIER_PIN_DRV_4_8,
 			     38, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", 0,
 			     39, UNIPHIER_PIN_DRV_4_8,
 			     39, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(44, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(44, "USB1OD", 0,
 			     40, UNIPHIER_PIN_DRV_4_8,
 			     40, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(45, "PCRESET", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(45, "PCRESET", 0,
 			     41, UNIPHIER_PIN_DRV_4_8,
 			     41, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(46, "PCREG", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(46, "PCREG", 0,
 			     42, UNIPHIER_PIN_DRV_4_8,
 			     42, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(47, "PCCE2", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(47, "PCCE2", 0,
 			     43, UNIPHIER_PIN_DRV_4_8,
 			     43, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(48, "PCVS1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(48, "PCVS1", 0,
 			     44, UNIPHIER_PIN_DRV_4_8,
 			     44, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(49, "PCCD2", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(49, "PCCD2", 0,
 			     45, UNIPHIER_PIN_DRV_4_8,
 			     45, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(50, "PCCD1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(50, "PCCD1", 0,
 			     46, UNIPHIER_PIN_DRV_4_8,
 			     46, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(51, "PCREADY", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(51, "PCREADY", 0,
 			     47, UNIPHIER_PIN_DRV_4_8,
 			     47, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(52, "PCDOE", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(52, "PCDOE", 0,
 			     48, UNIPHIER_PIN_DRV_4_8,
 			     48, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(53, "PCCE1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(53, "PCCE1", 0,
 			     49, UNIPHIER_PIN_DRV_4_8,
 			     49, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(54, "PCWE", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(54, "PCWE", 0,
 			     50, UNIPHIER_PIN_DRV_4_8,
 			     50, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(55, "PCOE", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(55, "PCOE", 0,
 			     51, UNIPHIER_PIN_DRV_4_8,
 			     51, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(56, "PCWAIT", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(56, "PCWAIT", 0,
 			     52, UNIPHIER_PIN_DRV_4_8,
 			     52, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(57, "PCIOWR", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(57, "PCIOWR", 0,
 			     53, UNIPHIER_PIN_DRV_4_8,
 			     53, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(58, "PCIORD", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(58, "PCIORD", 0,
 			     54, UNIPHIER_PIN_DRV_4_8,
 			     54, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", 0,
 			     55, UNIPHIER_PIN_DRV_4_8,
 			     55, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", 0,
 			     56, UNIPHIER_PIN_DRV_4_8,
 			     56, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", 0,
 			     57, UNIPHIER_PIN_DRV_4_8,
 			     57, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", 0,
 			     58, UNIPHIER_PIN_DRV_4_8,
 			     58, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", 0,
 			     59, UNIPHIER_PIN_DRV_4_8,
 			     59, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", 0,
 			     60, UNIPHIER_PIN_DRV_4_8,
 			     60, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", 0,
 			     61, UNIPHIER_PIN_DRV_4_8,
 			     61, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", 0,
 			     62, UNIPHIER_PIN_DRV_4_8,
 			     62, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", 0,
 			     63, UNIPHIER_PIN_DRV_4_8,
 			     63, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", 0,
 			     64, UNIPHIER_PIN_DRV_4_8,
 			     64, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", 0,
 			     65, UNIPHIER_PIN_DRV_4_8,
 			     65, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", 0,
 			     66, UNIPHIER_PIN_DRV_4_8,
 			     66, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", 0,
 			     67, UNIPHIER_PIN_DRV_4_8,
 			     67, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", 0,
 			     68, UNIPHIER_PIN_DRV_4_8,
 			     68, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", 0,
 			     69, UNIPHIER_PIN_DRV_4_8,
 			     69, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", 0,
 			     70, UNIPHIER_PIN_DRV_4_8,
 			     70, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", 0,
 			     71, UNIPHIER_PIN_DRV_4_8,
 			     71, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", 0,
 			     72, UNIPHIER_PIN_DRV_4_8,
 			     72, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", 0,
 			     73, UNIPHIER_PIN_DRV_4_8,
 			     73, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", 0,
 			     74, UNIPHIER_PIN_DRV_4_8,
 			     74, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", 0,
 			     75, UNIPHIER_PIN_DRV_4_8,
 			     75, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", 0,
 			     76, UNIPHIER_PIN_DRV_4_8,
 			     76, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", 0,
 			     77, UNIPHIER_PIN_DRV_4_8,
 			     77, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", 0,
 			     78, UNIPHIER_PIN_DRV_4_8,
 			     78, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", 0,
 			     79, UNIPHIER_PIN_DRV_4_8,
 			     79, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", 0,
 			     80, UNIPHIER_PIN_DRV_4_8,
 			     80, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", 0,
 			     81, UNIPHIER_PIN_DRV_4_8,
 			     81, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", 0,
 			     82, UNIPHIER_PIN_DRV_4_8,
 			     82, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", 0,
 			     83, UNIPHIER_PIN_DRV_4_8,
 			     83, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", 0,
 			     84, UNIPHIER_PIN_DRV_4_8,
 			     84, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", 0,
 			     85, UNIPHIER_PIN_DRV_4_8,
 			     85, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", 0,
 			     86, UNIPHIER_PIN_DRV_4_8,
 			     86, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", 0,
 			     87, UNIPHIER_PIN_DRV_4_8,
 			     87, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(92, "AGCI", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(92, "AGCI", 3,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     132, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(93, "AGCR", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(93, "AGCR", 4,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     133, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(94, "AGCBS", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(94, "AGCBS", 5,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     134, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(95, "IECOUT", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(95, "IECOUT", 0,
 			     88, UNIPHIER_PIN_DRV_4_8,
 			     88, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(96, "ASMCK", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(96, "ASMCK", 0,
 			     89, UNIPHIER_PIN_DRV_4_8,
 			     89, UNIPHIER_PIN_PULL_DOWN),
 	UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
@@ -325,31 +325,31 @@
 	UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE,
 			     93, UNIPHIER_PIN_DRV_4_8,
 			     93, UNIPHIER_PIN_PULL_UP),
-	UNIPHIER_PINCTRL_PIN(101, "ARCOUT", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0,
 			     94, UNIPHIER_PIN_DRV_4_8,
 			     94, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(102, "SDA0", 10,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(103, "SCL0", 10,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(104, "SDA1", 11,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(105, "SCL1", 11,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", 12,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", 12,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", 13,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", 13,
 			     -1, UNIPHIER_PIN_DRV_FIXED_4,
 			     -1, UNIPHIER_PIN_PULL_NONE),
 	UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
@@ -358,76 +358,76 @@
 	UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
 			     96, UNIPHIER_PIN_DRV_4_8,
 			     96, UNIPHIER_PIN_PULL_UP),
-	UNIPHIER_PINCTRL_PIN(112, "SBO1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(112, "SBO1", 0,
 			     97, UNIPHIER_PIN_DRV_4_8,
 			     97, UNIPHIER_PIN_PULL_UP),
-	UNIPHIER_PINCTRL_PIN(113, "SBI1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(113, "SBI1", 0,
 			     98, UNIPHIER_PIN_DRV_4_8,
 			     98, UNIPHIER_PIN_PULL_UP),
-	UNIPHIER_PINCTRL_PIN(114, "TXD1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(114, "TXD1", 0,
 			     99, UNIPHIER_PIN_DRV_4_8,
 			     99, UNIPHIER_PIN_PULL_UP),
-	UNIPHIER_PINCTRL_PIN(115, "RXD1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(115, "RXD1", 0,
 			     100, UNIPHIER_PIN_DRV_4_8,
 			     100, UNIPHIER_PIN_PULL_UP),
-	UNIPHIER_PINCTRL_PIN(116, "HIN", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(116, "HIN", 1,
 			     -1, UNIPHIER_PIN_DRV_FIXED_5,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(117, "VIN", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(117, "VIN", 2,
 			     -1, UNIPHIER_PIN_DRV_FIXED_5,
 			     -1, UNIPHIER_PIN_PULL_NONE),
-	UNIPHIER_PINCTRL_PIN(118, "TCON0", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(118, "TCON0", 0,
 			     101, UNIPHIER_PIN_DRV_4_8,
 			     101, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(119, "TCON1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(119, "TCON1", 0,
 			     102, UNIPHIER_PIN_DRV_4_8,
 			     102, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(120, "TCON2", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(120, "TCON2", 0,
 			     103, UNIPHIER_PIN_DRV_4_8,
 			     103, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(121, "TCON3", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(121, "TCON3", 0,
 			     104, UNIPHIER_PIN_DRV_4_8,
 			     104, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(122, "TCON4", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(122, "TCON4", 0,
 			     105, UNIPHIER_PIN_DRV_4_8,
 			     105, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(123, "TCON5", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(123, "TCON5", 0,
 			     106, UNIPHIER_PIN_DRV_4_8,
 			     106, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(124, "TCON6", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(124, "TCON6", 0,
 			     107, UNIPHIER_PIN_DRV_4_8,
 			     107, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(125, "TCON7", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(125, "TCON7", 0,
 			     108, UNIPHIER_PIN_DRV_4_8,
 			     108, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(126, "TCON8", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(126, "TCON8", 0,
 			     109, UNIPHIER_PIN_DRV_4_8,
 			     109, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(127, "PWMA", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(127, "PWMA", 0,
 			     110, UNIPHIER_PIN_DRV_4_8,
 			     110, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(128, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(128, "XIRQ0", 0,
 			     111, UNIPHIER_PIN_DRV_4_8,
 			     111, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(129, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(129, "XIRQ1", 0,
 			     112, UNIPHIER_PIN_DRV_4_8,
 			     112, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(130, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(130, "XIRQ2", 0,
 			     113, UNIPHIER_PIN_DRV_4_8,
 			     113, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(131, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(131, "XIRQ3", 0,
 			     114, UNIPHIER_PIN_DRV_4_8,
 			     114, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(132, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(132, "XIRQ4", 0,
 			     115, UNIPHIER_PIN_DRV_4_8,
 			     115, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(133, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(133, "XIRQ5", 0,
 			     116, UNIPHIER_PIN_DRV_4_8,
 			     116, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(134, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(134, "XIRQ6", 0,
 			     117, UNIPHIER_PIN_DRV_4_8,
 			     117, UNIPHIER_PIN_PULL_DOWN),
-	UNIPHIER_PINCTRL_PIN(135, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE,
+	UNIPHIER_PINCTRL_PIN(135, "XIRQ7", 0,
 			     118, UNIPHIER_PIN_DRV_4_8,
 			     118, UNIPHIER_PIN_PULL_DOWN),
 };
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 01bf347..a9567af 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -192,9 +192,9 @@
 	AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
 		 AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
 	AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20,
-		 AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
+		 AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
 	AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
-		 AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
+		 AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
 	/* secondary switchable output of DCDC1 */
 	AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100,
 		    AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7849187..8a34f6a 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1403,6 +1403,10 @@
 			return 0;
 		}
 
+		/* Did the lookup explicitly defer for us? */
+		if (ret == -EPROBE_DEFER)
+			return ret;
+
 		if (have_full_constraints()) {
 			r = dummy_regulator_rdev;
 		} else {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 848e3b6..4bb5262f 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -319,6 +319,8 @@
 	int retries = 0, cc;
 	unsigned long laob = 0;
 
+	WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) ||
+			     !q->u.out.use_cq));
 	if (q->u.out.use_cq && aob != 0) {
 		fc = QDIO_SIGA_WRITEQ;
 		laob = aob;
@@ -329,8 +331,6 @@
 		fc |= QDIO_SIGA_QEBSM_FLAG;
 	}
 again:
-	WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
-		(aob && fc != QDIO_SIGA_WRITEQ));
 	cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
 
 	/* hipersocket busy condition */
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6719447d..1766a20 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -18,6 +18,7 @@
 #include <linux/bitops.h>
 #include <linux/seq_file.h>
 #include <linux/ethtool.h>
+#include <linux/hashtable.h>
 
 #include <net/ipv6.h>
 #include <net/if_inet6.h>
@@ -739,11 +740,17 @@
 	unsigned short vid;
 };
 
-struct qeth_mc_mac {
-	struct list_head list;
-	__u8 mc_addr[MAX_ADDR_LEN];
-	unsigned char mc_addrlen;
-	int is_vmac;
+enum qeth_mac_disposition {
+	QETH_DISP_MAC_DELETE = 0,
+	QETH_DISP_MAC_DO_NOTHING = 1,
+	QETH_DISP_MAC_ADD = 2,
+};
+
+struct qeth_mac {
+	u8 mac_addr[OSA_ADDR_LEN];
+	u8 is_uc:1;
+	u8 disp_flag:2;
+	struct hlist_node hnode;
 };
 
 struct qeth_rx {
@@ -790,7 +797,7 @@
 	spinlock_t mclock;
 	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 	struct list_head vid_list;
-	struct list_head mc_list;
+	DECLARE_HASHTABLE(mac_htable, 4);
 	struct work_struct kernel_thread_starter;
 	spinlock_t thread_mask_lock;
 	unsigned long thread_start_mask;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index dc905b3..8f1b091 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -19,7 +19,9 @@
 #include <linux/mii.h>
 #include <linux/ip.h>
 #include <linux/list.h>
-
+#include <linux/hash.h>
+#include <linux/hashtable.h>
+#include <linux/string.h>
 #include "qeth_core.h"
 #include "qeth_l2.h"
 
@@ -28,7 +30,7 @@
 static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
 static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
 			   enum qeth_ipa_cmds);
-static void qeth_l2_set_multicast_list(struct net_device *);
+static void qeth_l2_set_rx_mode(struct net_device *);
 static int qeth_l2_recover(void *);
 static void qeth_bridgeport_query_support(struct qeth_card *card);
 static void qeth_bridge_state_change(struct qeth_card *card,
@@ -193,49 +195,44 @@
 	return rc;
 }
 
-static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
+static inline u32 qeth_l2_mac_hash(const u8 *addr)
 {
-	struct qeth_mc_mac *mc;
-	int rc;
-
-	mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC);
-
-	if (!mc)
-		return;
-
-	memcpy(mc->mc_addr, mac, OSA_ADDR_LEN);
-	mc->mc_addrlen = OSA_ADDR_LEN;
-	mc->is_vmac = vmac;
-
-	if (vmac) {
-		rc = qeth_setdel_makerc(card,
-			qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));
-	} else {
-		rc = qeth_setdel_makerc(card,
-			qeth_l2_send_setgroupmac(card, mac));
-	}
-
-	if (!rc)
-		list_add_tail(&mc->list, &card->mc_list);
-	else
-		kfree(mc);
+	return get_unaligned((u32 *)(&addr[2]));
 }
 
-static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
+static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
 {
-	struct qeth_mc_mac *mc, *tmp;
+
+	int rc;
+
+	if (mac->is_uc) {
+		rc = qeth_setdel_makerc(card,
+				qeth_l2_send_setdelmac(card, mac->mac_addr,
+						IPA_CMD_SETVMAC));
+	} else {
+		rc = qeth_setdel_makerc(card,
+				qeth_l2_send_setgroupmac(card, mac->mac_addr));
+	}
+	return rc;
+}
+
+static void qeth_l2_del_all_macs(struct qeth_card *card, int del)
+{
+	struct qeth_mac *mac;
+	struct hlist_node *tmp;
+	int i;
 
 	spin_lock_bh(&card->mclock);
-	list_for_each_entry_safe(mc, tmp, &card->mc_list, list) {
+	hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
 		if (del) {
-			if (mc->is_vmac)
-				qeth_l2_send_setdelmac(card, mc->mc_addr,
-					IPA_CMD_DELVMAC);
+			if (mac->is_uc)
+				qeth_l2_send_setdelmac(card, mac->mac_addr,
+						IPA_CMD_DELVMAC);
 			else
-				qeth_l2_send_delgroupmac(card, mc->mc_addr);
+				qeth_l2_send_delgroupmac(card, mac->mac_addr);
 		}
-		list_del(&mc->list);
-		kfree(mc);
+		hash_del(&mac->hnode);
+		kfree(mac);
 	}
 	spin_unlock_bh(&card->mclock);
 }
@@ -403,7 +400,7 @@
 		rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
 		kfree(tmpid);
 	}
-	qeth_l2_set_multicast_list(card->dev);
+	qeth_l2_set_rx_mode(card->dev);
 	return rc;
 }
 
@@ -460,7 +457,7 @@
 		card->state = CARD_STATE_SOFTSETUP;
 	}
 	if (card->state == CARD_STATE_SOFTSETUP) {
-		qeth_l2_del_all_mc(card, 0);
+		qeth_l2_del_all_macs(card, 0);
 		qeth_clear_ipacmd_list(card);
 		card->state = CARD_STATE_HARDSETUP;
 	}
@@ -511,7 +508,7 @@
 			if (skb->protocol == htons(ETH_P_802_2))
 				*((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
 			len = skb->len;
-			netif_receive_skb(skb);
+			napi_gro_receive(&card->napi, skb);
 			break;
 		case QETH_HEADER_TYPE_OSN:
 			if (card->info.type == QETH_CARD_TYPE_OSN) {
@@ -768,29 +765,91 @@
 		card->options.sbp.role = role;
 		card->info.promisc_mode = promisc_mode;
 	}
+
+}
+/* New MAC address is added to the hash table and marked to be written on card
+ * only if there is not in the hash table storage already
+ *
+*/
+static	void
+qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc)
+{
+	struct qeth_mac *mac;
+
+	hash_for_each_possible(card->mac_htable, mac, hnode,
+			qeth_l2_mac_hash(ha->addr)) {
+		if (is_uc == mac->is_uc &&
+		    !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) {
+			mac->disp_flag = QETH_DISP_MAC_DO_NOTHING;
+			return;
+		}
+	}
+
+	mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC);
+
+	if (!mac)
+		return;
+
+	memcpy(mac->mac_addr, ha->addr, OSA_ADDR_LEN);
+	mac->is_uc = is_uc;
+	mac->disp_flag = QETH_DISP_MAC_ADD;
+
+	hash_add(card->mac_htable, &mac->hnode,
+			qeth_l2_mac_hash(mac->mac_addr));
+
 }
 
-static void qeth_l2_set_multicast_list(struct net_device *dev)
+static void qeth_l2_set_rx_mode(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
 	struct netdev_hw_addr *ha;
+	struct qeth_mac *mac;
+	struct hlist_node *tmp;
+	int i;
+	int rc;
 
 	if (card->info.type == QETH_CARD_TYPE_OSN)
-		return ;
+		return;
 
 	QETH_CARD_TEXT(card, 3, "setmulti");
 	if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
 	    (card->state != CARD_STATE_UP))
 		return;
-	qeth_l2_del_all_mc(card, 1);
+
 	spin_lock_bh(&card->mclock);
+
 	netdev_for_each_mc_addr(ha, dev)
-		qeth_l2_add_mc(card, ha->addr, 0);
+		qeth_l2_add_mac(card, ha, 0);
 
 	netdev_for_each_uc_addr(ha, dev)
-		qeth_l2_add_mc(card, ha->addr, 1);
+		qeth_l2_add_mac(card, ha, 1);
+
+	hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
+		if (mac->disp_flag == QETH_DISP_MAC_DELETE) {
+			if (!mac->is_uc)
+				rc = qeth_l2_send_delgroupmac(card,
+						mac->mac_addr);
+			else {
+				rc = qeth_l2_send_setdelmac(card, mac->mac_addr,
+						IPA_CMD_DELVMAC);
+			}
+
+			hash_del(&mac->hnode);
+			kfree(mac);
+
+		} else if (mac->disp_flag == QETH_DISP_MAC_ADD) {
+			rc = qeth_l2_write_mac(card, mac);
+			if (rc) {
+				hash_del(&mac->hnode);
+				kfree(mac);
+			} else
+				mac->disp_flag = QETH_DISP_MAC_DELETE;
+		} else
+			mac->disp_flag = QETH_DISP_MAC_DELETE;
+	}
 
 	spin_unlock_bh(&card->mclock);
+
 	if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
 		qeth_setadp_promisc_mode(card);
 	else
@@ -974,7 +1033,7 @@
 
 	qeth_l2_create_device_attributes(&gdev->dev);
 	INIT_LIST_HEAD(&card->vid_list);
-	INIT_LIST_HEAD(&card->mc_list);
+	hash_init(card->mac_htable);
 	card->options.layer2 = 1;
 	card->info.hwtrap = 0;
 	return 0;
@@ -1020,7 +1079,7 @@
 	.ndo_get_stats		= qeth_get_stats,
 	.ndo_start_xmit		= qeth_l2_hard_start_xmit,
 	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_rx_mode	= qeth_l2_set_multicast_list,
+	.ndo_set_rx_mode	= qeth_l2_set_rx_mode,
 	.ndo_do_ioctl	   	= qeth_l2_do_ioctl,
 	.ndo_set_mac_address    = qeth_l2_set_mac_address,
 	.ndo_change_mtu	   	= qeth_change_mtu,
@@ -1179,7 +1238,7 @@
 			rtnl_unlock();
 		}
 		/* this also sets saved unicast addresses */
-		qeth_l2_set_multicast_list(card->dev);
+		qeth_l2_set_rx_mode(card->dev);
 	}
 	/* let user_space know that device is online */
 	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index add419d..a56a7b2 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -212,6 +212,17 @@
 	.llseek		= noop_llseek,
 };
 
+/*
+ * The controllers use an inline buffer instead of a mapped SGL for small,
+ * single entry buffers.  Note that we treat a zero-length transfer like
+ * a mapped SGL.
+ */
+static bool twa_command_mapped(struct scsi_cmnd *cmd)
+{
+	return scsi_sg_count(cmd) != 1 ||
+		scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
+}
+
 /* This function will complete an aen request from the isr */
 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
 {
@@ -1339,7 +1350,8 @@
 				}
 
 				/* Now complete the io */
-				scsi_dma_unmap(cmd);
+				if (twa_command_mapped(cmd))
+					scsi_dma_unmap(cmd);
 				cmd->scsi_done(cmd);
 				tw_dev->state[request_id] = TW_S_COMPLETED;
 				twa_free_request_id(tw_dev, request_id);
@@ -1582,7 +1594,8 @@
 				struct scsi_cmnd *cmd = tw_dev->srb[i];
 
 				cmd->result = (DID_RESET << 16);
-				scsi_dma_unmap(cmd);
+				if (twa_command_mapped(cmd))
+					scsi_dma_unmap(cmd);
 				cmd->scsi_done(cmd);
 			}
 		}
@@ -1765,12 +1778,14 @@
 	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
 	switch (retval) {
 	case SCSI_MLQUEUE_HOST_BUSY:
-		scsi_dma_unmap(SCpnt);
+		if (twa_command_mapped(SCpnt))
+			scsi_dma_unmap(SCpnt);
 		twa_free_request_id(tw_dev, request_id);
 		break;
 	case 1:
 		SCpnt->result = (DID_ERROR << 16);
-		scsi_dma_unmap(SCpnt);
+		if (twa_command_mapped(SCpnt))
+			scsi_dma_unmap(SCpnt);
 		done(SCpnt);
 		tw_dev->state[request_id] = TW_S_COMPLETED;
 		twa_free_request_id(tw_dev, request_id);
@@ -1831,8 +1846,7 @@
 		/* Map sglist from scsi layer to cmd packet */
 
 		if (scsi_sg_count(srb)) {
-			if ((scsi_sg_count(srb) == 1) &&
-			    (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
+			if (!twa_command_mapped(srb)) {
 				if (srb->sc_data_direction == DMA_TO_DEVICE ||
 				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
 					scsi_sg_copy_to_buffer(srb,
@@ -1905,7 +1919,7 @@
 {
 	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
 
-	if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
+	if (!twa_command_mapped(cmd) &&
 	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
 	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
 		if (scsi_sg_count(cmd) == 1) {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index de6feb8..804806e 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -160,7 +160,7 @@
 
 #define DIV_ROUND_UP(n, d)	(((n) + (d) - 1) / (d))
 #define RCV_BUFSIZ_MASK		0x3FFU
-#define MAX_IMM_TX_PKT_LEN	128
+#define MAX_IMM_TX_PKT_LEN	256
 
 static int push_tx_frames(struct cxgbi_sock *, int);
 
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 33c74d3..6bffd91 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -976,13 +976,13 @@
 	wake_up(&conn->ehwait);
 }
 
-static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
 {
         struct iscsi_nopout hdr;
 	struct iscsi_task *task;
 
 	if (!rhdr && conn->ping_task)
-		return;
+		return -EINVAL;
 
 	memset(&hdr, 0, sizeof(struct iscsi_nopout));
 	hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
@@ -996,13 +996,16 @@
 		hdr.ttt = RESERVED_ITT;
 
 	task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
-	if (!task)
+	if (!task) {
 		iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
-	else if (!rhdr) {
+		return -EIO;
+	} else if (!rhdr) {
 		/* only track our nops */
 		conn->ping_task = task;
 		conn->last_ping = jiffies;
 	}
+
+	return 0;
 }
 
 static int iscsi_nop_out_rsp(struct iscsi_task *task,
@@ -2092,8 +2095,10 @@
 	if (time_before_eq(last_recv + recv_timeout, jiffies)) {
 		/* send a ping to try to provoke some traffic */
 		ISCSI_DBG_CONN(conn, "Sending nopout as ping\n");
-		iscsi_send_nopout(conn, NULL);
-		next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
+		if (iscsi_send_nopout(conn, NULL))
+			next_timeout = jiffies + (1 * HZ);
+		else
+			next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
 	} else
 		next_timeout = last_recv + recv_timeout;
 
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index edb044a..0a2168e 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -111,7 +111,7 @@
 
 	dh = __scsi_dh_lookup(name);
 	if (!dh) {
-		request_module(name);
+		request_module("scsi_dh_%s", name);
 		dh = __scsi_dh_lookup(name);
 	}
 
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index cbfc599..126a48c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1957,7 +1957,7 @@
 static void scsi_mq_done(struct scsi_cmnd *cmd)
 {
 	trace_scsi_dispatch_cmd_done(cmd);
-	blk_mq_complete_request(cmd->request);
+	blk_mq_complete_request(cmd->request, cmd->request->errors);
 }
 
 static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 3cf9faa..a85d863d 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -992,11 +992,12 @@
 		goto free_master;
 	}
 
-	dspi->irq = platform_get_irq(pdev, 0);
-	if (dspi->irq <= 0) {
+	ret = platform_get_irq(pdev, 0);
+	if (ret == 0)
 		ret = -EINVAL;
+	if (ret < 0)
 		goto free_master;
-	}
+	dspi->irq = ret;
 
 	ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
 				dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index f0d22cd..149214b 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -80,6 +80,15 @@
 
 	  If unsure, say N
 
+config SSB_HOST_SOC
+	bool "Support for SSB bus on SoC"
+	depends on SSB
+	help
+	  Host interface for a SSB directly mapped into memory. This is
+	  for some Broadcom SoCs from the BCM47xx and BCM53xx lines.
+
+	  If unsure, say N
+
 config SSB_SILENT
 	bool "No SSB kernel messages"
 	depends on SSB && EXPERT
diff --git a/drivers/ssb/Makefile b/drivers/ssb/Makefile
index b1ddc11..64a0968 100644
--- a/drivers/ssb/Makefile
+++ b/drivers/ssb/Makefile
@@ -5,8 +5,9 @@
 
 # host support
 ssb-$(CONFIG_SSB_PCIHOST)		+= pci.o pcihost_wrapper.o
-ssb-$(CONFIG_SSB_PCMCIAHOST)		+= pcmcia.o
+ssb-$(CONFIG_SSB_PCMCIAHOST)		+= pcmcia.o bridge_pcmcia_80211.o
 ssb-$(CONFIG_SSB_SDIOHOST)		+= sdio.o
+ssb-$(CONFIG_SSB_HOST_SOC)		+= host_soc.o
 
 # built-in drivers
 ssb-y					+= driver_chipcommon.o
diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c
new file mode 100644
index 0000000..d70568e
--- /dev/null
+++ b/drivers/ssb/bridge_pcmcia_80211.c
@@ -0,0 +1,128 @@
+/*
+ * Broadcom 43xx PCMCIA-SSB bridge module
+ *
+ * Copyright (c) 2007 Michael Buesch <m@bues.ch>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/ssb/ssb.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ciscode.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cisreg.h>
+
+#include "ssb_private.h"
+
+static const struct pcmcia_device_id ssb_host_pcmcia_tbl[] = {
+	PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448),
+	PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476),
+	PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, ssb_host_pcmcia_tbl);
+
+static int ssb_host_pcmcia_probe(struct pcmcia_device *dev)
+{
+	struct ssb_bus *ssb;
+	int err = -ENOMEM;
+	int res = 0;
+
+	ssb = kzalloc(sizeof(*ssb), GFP_KERNEL);
+	if (!ssb)
+		goto out_error;
+
+	err = -ENODEV;
+
+	dev->config_flags |= CONF_ENABLE_IRQ;
+
+	dev->resource[2]->flags |=  WIN_ENABLE | WIN_DATA_WIDTH_16 |
+			 WIN_USE_WAIT;
+	dev->resource[2]->start = 0;
+	dev->resource[2]->end = SSB_CORE_SIZE;
+	res = pcmcia_request_window(dev, dev->resource[2], 250);
+	if (res != 0)
+		goto err_kfree_ssb;
+
+	res = pcmcia_map_mem_page(dev, dev->resource[2], 0);
+	if (res != 0)
+		goto err_disable;
+
+	if (!dev->irq)
+		goto err_disable;
+
+	res = pcmcia_enable_device(dev);
+	if (res != 0)
+		goto err_disable;
+
+	err = ssb_bus_pcmciabus_register(ssb, dev, dev->resource[2]->start);
+	if (err)
+		goto err_disable;
+	dev->priv = ssb;
+
+	return 0;
+
+err_disable:
+	pcmcia_disable_device(dev);
+err_kfree_ssb:
+	kfree(ssb);
+out_error:
+	ssb_err("Initialization failed (%d, %d)\n", res, err);
+	return err;
+}
+
+static void ssb_host_pcmcia_remove(struct pcmcia_device *dev)
+{
+	struct ssb_bus *ssb = dev->priv;
+
+	ssb_bus_unregister(ssb);
+	pcmcia_disable_device(dev);
+	kfree(ssb);
+	dev->priv = NULL;
+}
+
+#ifdef CONFIG_PM
+static int ssb_host_pcmcia_suspend(struct pcmcia_device *dev)
+{
+	struct ssb_bus *ssb = dev->priv;
+
+	return ssb_bus_suspend(ssb);
+}
+
+static int ssb_host_pcmcia_resume(struct pcmcia_device *dev)
+{
+	struct ssb_bus *ssb = dev->priv;
+
+	return ssb_bus_resume(ssb);
+}
+#else /* CONFIG_PM */
+# define ssb_host_pcmcia_suspend		NULL
+# define ssb_host_pcmcia_resume		NULL
+#endif /* CONFIG_PM */
+
+static struct pcmcia_driver ssb_host_pcmcia_driver = {
+	.owner		= THIS_MODULE,
+	.name		= "ssb-pcmcia",
+	.id_table	= ssb_host_pcmcia_tbl,
+	.probe		= ssb_host_pcmcia_probe,
+	.remove		= ssb_host_pcmcia_remove,
+	.suspend	= ssb_host_pcmcia_suspend,
+	.resume		= ssb_host_pcmcia_resume,
+};
+
+/*
+ * These are not module init/exit functions!
+ * The module_pcmcia_driver() helper cannot be used here.
+ */
+int ssb_host_pcmcia_init(void)
+{
+	return pcmcia_register_driver(&ssb_host_pcmcia_driver);
+}
+
+void ssb_host_pcmcia_exit(void)
+{
+	pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
+}
diff --git a/drivers/ssb/host_soc.c b/drivers/ssb/host_soc.c
new file mode 100644
index 0000000..c809f25
--- /dev/null
+++ b/drivers/ssb/host_soc.c
@@ -0,0 +1,173 @@
+/*
+ * Sonics Silicon Backplane SoC host related functions.
+ * Subsystem core
+ *
+ * Copyright 2005, Broadcom Corporation
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/ssb/ssb.h>
+
+#include "ssb_private.h"
+
+static u8 ssb_host_soc_read8(struct ssb_device *dev, u16 offset)
+{
+	struct ssb_bus *bus = dev->bus;
+
+	offset += dev->core_index * SSB_CORE_SIZE;
+	return readb(bus->mmio + offset);
+}
+
+static u16 ssb_host_soc_read16(struct ssb_device *dev, u16 offset)
+{
+	struct ssb_bus *bus = dev->bus;
+
+	offset += dev->core_index * SSB_CORE_SIZE;
+	return readw(bus->mmio + offset);
+}
+
+static u32 ssb_host_soc_read32(struct ssb_device *dev, u16 offset)
+{
+	struct ssb_bus *bus = dev->bus;
+
+	offset += dev->core_index * SSB_CORE_SIZE;
+	return readl(bus->mmio + offset);
+}
+
+#ifdef CONFIG_SSB_BLOCKIO
+static void ssb_host_soc_block_read(struct ssb_device *dev, void *buffer,
+				    size_t count, u16 offset, u8 reg_width)
+{
+	struct ssb_bus *bus = dev->bus;
+	void __iomem *addr;
+
+	offset += dev->core_index * SSB_CORE_SIZE;
+	addr = bus->mmio + offset;
+
+	switch (reg_width) {
+	case sizeof(u8): {
+		u8 *buf = buffer;
+
+		while (count) {
+			*buf = __raw_readb(addr);
+			buf++;
+			count--;
+		}
+		break;
+	}
+	case sizeof(u16): {
+		__le16 *buf = buffer;
+
+		SSB_WARN_ON(count & 1);
+		while (count) {
+			*buf = (__force __le16)__raw_readw(addr);
+			buf++;
+			count -= 2;
+		}
+		break;
+	}
+	case sizeof(u32): {
+		__le32 *buf = buffer;
+
+		SSB_WARN_ON(count & 3);
+		while (count) {
+			*buf = (__force __le32)__raw_readl(addr);
+			buf++;
+			count -= 4;
+		}
+		break;
+	}
+	default:
+		SSB_WARN_ON(1);
+	}
+}
+#endif /* CONFIG_SSB_BLOCKIO */
+
+static void ssb_host_soc_write8(struct ssb_device *dev, u16 offset, u8 value)
+{
+	struct ssb_bus *bus = dev->bus;
+
+	offset += dev->core_index * SSB_CORE_SIZE;
+	writeb(value, bus->mmio + offset);
+}
+
+static void ssb_host_soc_write16(struct ssb_device *dev, u16 offset, u16 value)
+{
+	struct ssb_bus *bus = dev->bus;
+
+	offset += dev->core_index * SSB_CORE_SIZE;
+	writew(value, bus->mmio + offset);
+}
+
+static void ssb_host_soc_write32(struct ssb_device *dev, u16 offset, u32 value)
+{
+	struct ssb_bus *bus = dev->bus;
+
+	offset += dev->core_index * SSB_CORE_SIZE;
+	writel(value, bus->mmio + offset);
+}
+
+#ifdef CONFIG_SSB_BLOCKIO
+static void ssb_host_soc_block_write(struct ssb_device *dev, const void *buffer,
+				     size_t count, u16 offset, u8 reg_width)
+{
+	struct ssb_bus *bus = dev->bus;
+	void __iomem *addr;
+
+	offset += dev->core_index * SSB_CORE_SIZE;
+	addr = bus->mmio + offset;
+
+	switch (reg_width) {
+	case sizeof(u8): {
+		const u8 *buf = buffer;
+
+		while (count) {
+			__raw_writeb(*buf, addr);
+			buf++;
+			count--;
+		}
+		break;
+	}
+	case sizeof(u16): {
+		const __le16 *buf = buffer;
+
+		SSB_WARN_ON(count & 1);
+		while (count) {
+			__raw_writew((__force u16)(*buf), addr);
+			buf++;
+			count -= 2;
+		}
+		break;
+	}
+	case sizeof(u32): {
+		const __le32 *buf = buffer;
+
+		SSB_WARN_ON(count & 3);
+		while (count) {
+			__raw_writel((__force u32)(*buf), addr);
+			buf++;
+			count -= 4;
+		}
+		break;
+	}
+	default:
+		SSB_WARN_ON(1);
+	}
+}
+#endif /* CONFIG_SSB_BLOCKIO */
+
+/* Ops for the plain SSB bus without a host-device (no PCI or PCMCIA). */
+const struct ssb_bus_ops ssb_host_soc_ops = {
+	.read8		= ssb_host_soc_read8,
+	.read16		= ssb_host_soc_read16,
+	.read32		= ssb_host_soc_read32,
+	.write8		= ssb_host_soc_write8,
+	.write16	= ssb_host_soc_write16,
+	.write32	= ssb_host_soc_write32,
+#ifdef CONFIG_SSB_BLOCKIO
+	.block_read	= ssb_host_soc_block_read,
+	.block_write	= ssb_host_soc_block_write,
+#endif
+};
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index a48a743..5d1e9a0 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -596,166 +596,6 @@
 	return err;
 }
 
-static u8 ssb_ssb_read8(struct ssb_device *dev, u16 offset)
-{
-	struct ssb_bus *bus = dev->bus;
-
-	offset += dev->core_index * SSB_CORE_SIZE;
-	return readb(bus->mmio + offset);
-}
-
-static u16 ssb_ssb_read16(struct ssb_device *dev, u16 offset)
-{
-	struct ssb_bus *bus = dev->bus;
-
-	offset += dev->core_index * SSB_CORE_SIZE;
-	return readw(bus->mmio + offset);
-}
-
-static u32 ssb_ssb_read32(struct ssb_device *dev, u16 offset)
-{
-	struct ssb_bus *bus = dev->bus;
-
-	offset += dev->core_index * SSB_CORE_SIZE;
-	return readl(bus->mmio + offset);
-}
-
-#ifdef CONFIG_SSB_BLOCKIO
-static void ssb_ssb_block_read(struct ssb_device *dev, void *buffer,
-			       size_t count, u16 offset, u8 reg_width)
-{
-	struct ssb_bus *bus = dev->bus;
-	void __iomem *addr;
-
-	offset += dev->core_index * SSB_CORE_SIZE;
-	addr = bus->mmio + offset;
-
-	switch (reg_width) {
-	case sizeof(u8): {
-		u8 *buf = buffer;
-
-		while (count) {
-			*buf = __raw_readb(addr);
-			buf++;
-			count--;
-		}
-		break;
-	}
-	case sizeof(u16): {
-		__le16 *buf = buffer;
-
-		SSB_WARN_ON(count & 1);
-		while (count) {
-			*buf = (__force __le16)__raw_readw(addr);
-			buf++;
-			count -= 2;
-		}
-		break;
-	}
-	case sizeof(u32): {
-		__le32 *buf = buffer;
-
-		SSB_WARN_ON(count & 3);
-		while (count) {
-			*buf = (__force __le32)__raw_readl(addr);
-			buf++;
-			count -= 4;
-		}
-		break;
-	}
-	default:
-		SSB_WARN_ON(1);
-	}
-}
-#endif /* CONFIG_SSB_BLOCKIO */
-
-static void ssb_ssb_write8(struct ssb_device *dev, u16 offset, u8 value)
-{
-	struct ssb_bus *bus = dev->bus;
-
-	offset += dev->core_index * SSB_CORE_SIZE;
-	writeb(value, bus->mmio + offset);
-}
-
-static void ssb_ssb_write16(struct ssb_device *dev, u16 offset, u16 value)
-{
-	struct ssb_bus *bus = dev->bus;
-
-	offset += dev->core_index * SSB_CORE_SIZE;
-	writew(value, bus->mmio + offset);
-}
-
-static void ssb_ssb_write32(struct ssb_device *dev, u16 offset, u32 value)
-{
-	struct ssb_bus *bus = dev->bus;
-
-	offset += dev->core_index * SSB_CORE_SIZE;
-	writel(value, bus->mmio + offset);
-}
-
-#ifdef CONFIG_SSB_BLOCKIO
-static void ssb_ssb_block_write(struct ssb_device *dev, const void *buffer,
-				size_t count, u16 offset, u8 reg_width)
-{
-	struct ssb_bus *bus = dev->bus;
-	void __iomem *addr;
-
-	offset += dev->core_index * SSB_CORE_SIZE;
-	addr = bus->mmio + offset;
-
-	switch (reg_width) {
-	case sizeof(u8): {
-		const u8 *buf = buffer;
-
-		while (count) {
-			__raw_writeb(*buf, addr);
-			buf++;
-			count--;
-		}
-		break;
-	}
-	case sizeof(u16): {
-		const __le16 *buf = buffer;
-
-		SSB_WARN_ON(count & 1);
-		while (count) {
-			__raw_writew((__force u16)(*buf), addr);
-			buf++;
-			count -= 2;
-		}
-		break;
-	}
-	case sizeof(u32): {
-		const __le32 *buf = buffer;
-
-		SSB_WARN_ON(count & 3);
-		while (count) {
-			__raw_writel((__force u32)(*buf), addr);
-			buf++;
-			count -= 4;
-		}
-		break;
-	}
-	default:
-		SSB_WARN_ON(1);
-	}
-}
-#endif /* CONFIG_SSB_BLOCKIO */
-
-/* Ops for the plain SSB bus without a host-device (no PCI or PCMCIA). */
-static const struct ssb_bus_ops ssb_ssb_ops = {
-	.read8		= ssb_ssb_read8,
-	.read16		= ssb_ssb_read16,
-	.read32		= ssb_ssb_read32,
-	.write8		= ssb_ssb_write8,
-	.write16	= ssb_ssb_write16,
-	.write32	= ssb_ssb_write32,
-#ifdef CONFIG_SSB_BLOCKIO
-	.block_read	= ssb_ssb_block_read,
-	.block_write	= ssb_ssb_block_write,
-#endif
-};
-
 static int ssb_fetch_invariants(struct ssb_bus *bus,
 				ssb_invariants_func_t get_invariants)
 {
@@ -876,7 +716,6 @@
 
 	return err;
 }
-EXPORT_SYMBOL(ssb_bus_pcibus_register);
 #endif /* CONFIG_SSB_PCIHOST */
 
 #ifdef CONFIG_SSB_PCMCIAHOST
@@ -898,7 +737,6 @@
 
 	return err;
 }
-EXPORT_SYMBOL(ssb_bus_pcmciabus_register);
 #endif /* CONFIG_SSB_PCMCIAHOST */
 
 #ifdef CONFIG_SSB_SDIOHOST
@@ -923,13 +761,14 @@
 EXPORT_SYMBOL(ssb_bus_sdiobus_register);
 #endif /* CONFIG_SSB_PCMCIAHOST */
 
+#ifdef CONFIG_SSB_HOST_SOC
 int ssb_bus_ssbbus_register(struct ssb_bus *bus, unsigned long baseaddr,
 			    ssb_invariants_func_t get_invariants)
 {
 	int err;
 
 	bus->bustype = SSB_BUSTYPE_SSB;
-	bus->ops = &ssb_ssb_ops;
+	bus->ops = &ssb_host_soc_ops;
 
 	err = ssb_bus_register(bus, get_invariants, baseaddr);
 	if (!err) {
@@ -939,6 +778,7 @@
 
 	return err;
 }
+#endif
 
 int __ssb_driver_register(struct ssb_driver *drv, struct module *owner)
 {
@@ -1465,6 +1305,12 @@
 		/* don't fail SSB init because of this */
 		err = 0;
 	}
+	err = ssb_host_pcmcia_init();
+	if (err) {
+		ssb_err("PCMCIA host initialization failed\n");
+		/* don't fail SSB init because of this */
+		err = 0;
+	}
 	err = ssb_gige_init();
 	if (err) {
 		ssb_err("SSB Broadcom Gigabit Ethernet driver initialization failed\n");
@@ -1482,6 +1328,7 @@
 static void __exit ssb_modexit(void)
 {
 	ssb_gige_exit();
+	ssb_host_pcmcia_exit();
 	b43_pci_ssb_bridge_exit();
 	bus_unregister(&ssb_bustype);
 }
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index b413e01..f03422b 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -147,8 +147,7 @@
 	return err;
 }
 
-int ssb_pcmcia_switch_core(struct ssb_bus *bus,
-			   struct ssb_device *dev)
+static int ssb_pcmcia_switch_core(struct ssb_bus *bus, struct ssb_device *dev)
 {
 	int err;
 
diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c
index b2d36f7..2278e43 100644
--- a/drivers/ssb/sdio.c
+++ b/drivers/ssb/sdio.c
@@ -200,7 +200,7 @@
 }
 
 /* host must be already claimed */
-int ssb_sdio_switch_core(struct ssb_bus *bus, struct ssb_device *dev)
+static int ssb_sdio_switch_core(struct ssb_bus *bus, struct ssb_device *dev)
 {
 	u8 coreidx = dev->core_index;
 	u32 sbaddr;
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index eb507a5..15bfd5c 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -85,8 +85,6 @@
 
 /* pcmcia.c */
 #ifdef CONFIG_SSB_PCMCIAHOST
-extern int ssb_pcmcia_switch_core(struct ssb_bus *bus,
-				  struct ssb_device *dev);
 extern int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus,
 				     u8 coreidx);
 extern int ssb_pcmcia_switch_segment(struct ssb_bus *bus,
@@ -96,13 +94,10 @@
 extern int ssb_pcmcia_hardware_setup(struct ssb_bus *bus);
 extern void ssb_pcmcia_exit(struct ssb_bus *bus);
 extern int ssb_pcmcia_init(struct ssb_bus *bus);
+extern int ssb_host_pcmcia_init(void);
+extern void ssb_host_pcmcia_exit(void);
 extern const struct ssb_bus_ops ssb_pcmcia_ops;
 #else /* CONFIG_SSB_PCMCIAHOST */
-static inline int ssb_pcmcia_switch_core(struct ssb_bus *bus,
-					 struct ssb_device *dev)
-{
-	return 0;
-}
 static inline int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus,
 					    u8 coreidx)
 {
@@ -124,6 +119,13 @@
 {
 	return 0;
 }
+static inline int ssb_host_pcmcia_init(void)
+{
+	return 0;
+}
+static inline void ssb_host_pcmcia_exit(void)
+{
+}
 #endif /* CONFIG_SSB_PCMCIAHOST */
 
 /* sdio.c */
@@ -132,9 +134,7 @@
 				     struct ssb_init_invariants *iv);
 
 extern u32 ssb_sdio_scan_read32(struct ssb_bus *bus, u16 offset);
-extern int ssb_sdio_switch_core(struct ssb_bus *bus, struct ssb_device *dev);
 extern int ssb_sdio_scan_switch_coreidx(struct ssb_bus *bus, u8 coreidx);
-extern int ssb_sdio_hardware_setup(struct ssb_bus *bus);
 extern void ssb_sdio_exit(struct ssb_bus *bus);
 extern int ssb_sdio_init(struct ssb_bus *bus);
 
@@ -144,19 +144,10 @@
 {
 	return 0;
 }
-static inline int ssb_sdio_switch_core(struct ssb_bus *bus,
-					 struct ssb_device *dev)
-{
-	return 0;
-}
 static inline int ssb_sdio_scan_switch_coreidx(struct ssb_bus *bus, u8 coreidx)
 {
 	return 0;
 }
-static inline int ssb_sdio_hardware_setup(struct ssb_bus *bus)
-{
-	return 0;
-}
 static inline void ssb_sdio_exit(struct ssb_bus *bus)
 {
 }
@@ -166,6 +157,13 @@
 }
 #endif /* CONFIG_SSB_SDIOHOST */
 
+/**************************************************
+ * host_soc.c
+ **************************************************/
+
+#ifdef CONFIG_SSB_HOST_SOC
+extern const struct ssb_bus_ops ssb_host_soc_ops;
+#endif
 
 /* scan.c */
 extern const char *ssb_core_name(u16 coreid);
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 20288fc..8f3ac37 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -5,5 +5,25 @@
 	- add proper arch dependencies as needed
 	- audit userspace interfaces to make sure they are sane
 
+
+ion/
+ - Remove ION_IOC_SYNC: Flushing for devices should be purely a kernel internal
+   interface on top of dma-buf. flush_for_device needs to be added to dma-buf
+   first.
+ - Remove ION_IOC_CUSTOM: Atm used for cache flushing for cpu access in some
+   vendor trees. Should be replaced with an ioctl on the dma-buf to expose the
+   begin/end_cpu_access hooks to userspace.
+ - Clarify the tricks ion plays with explicitly managing coherency behind the
+   dma api's back (this is absolutely needed for high-perf gpu drivers): Add an
+   explicit coherency management mode to flush_for_device to be used by drivers
+   which want to manage caches themselves and which indicates whether cpu caches
+   need flushing.
+ - With those removed there's probably no use for ION_IOC_IMPORT anymore either
+   since ion would just be the central allocator for shared buffers.
+ - Add dt-binding to expose cma regions as ion heaps, with the rule that any
+   such cma regions must already be used by some device for dma. I.e. ion only
+   exposes existing cma regions and doesn't reserve unecessarily memory when
+   booting a system which doesn't use ion.
+
 Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
 Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 217aa53..6e8d839 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1179,13 +1179,13 @@
 		mutex_unlock(&client->lock);
 		goto end;
 	}
-	mutex_unlock(&client->lock);
 
 	handle = ion_handle_create(client, buffer);
-	if (IS_ERR(handle))
+	if (IS_ERR(handle)) {
+		mutex_unlock(&client->lock);
 		goto end;
+	}
 
-	mutex_lock(&client->lock);
 	ret = ion_handle_add(client, handle);
 	mutex_unlock(&client->lock);
 	if (ret) {
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 32f3a9d..5cafa50 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -76,7 +76,7 @@
 
 	/* Set CS active high */
 	par->spi->mode |= SPI_CS_HIGH;
-	ret = par->spi->master->setup(par->spi);
+	ret = spi_setup(par->spi);
 	if (ret) {
 		dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
 		return ret;
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index 88fb2c0..8eae6ef 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -169,7 +169,7 @@
 	/* enable SPI interface by having CS and MOSI low during reset */
 	save_mode = par->spi->mode;
 	par->spi->mode |= SPI_CS_HIGH;
-	ret = par->spi->master->setup(par->spi); /* set CS inactive low */
+	ret = spi_setup(par->spi); /* set CS inactive low */
 	if (ret) {
 		dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
 		return ret;
@@ -180,7 +180,7 @@
 	par->fbtftops.reset(par);
 	mdelay(1000);
 	par->spi->mode = save_mode;
-	ret = par->spi->master->setup(par->spi);
+	ret = spi_setup(par->spi);
 	if (ret) {
 		dev_err(par->info->device, "Could not restore SPI mode\n");
 		return ret;
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 23392eb..7f5fa3d 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -1436,15 +1436,11 @@
 
 	/* 9-bit SPI setup */
 	if (par->spi && display->buswidth == 9) {
-		par->spi->bits_per_word = 9;
-		ret = par->spi->master->setup(par->spi);
-		if (ret) {
+		if (par->spi->master->bits_per_word_mask & SPI_BPW_MASK(9)) {
+			par->spi->bits_per_word = 9;
+		} else {
 			dev_warn(&par->spi->dev,
 				"9-bit SPI not available, emulating using 8-bit.\n");
-			par->spi->bits_per_word = 8;
-			ret = par->spi->master->setup(par->spi);
-			if (ret)
-				goto out_release;
 			/* allocate buffer with room for dc bits */
 			par->extra = devm_kzalloc(par->info->device,
 				par->txbuf.len + (par->txbuf.len / 8) + 8,
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index c763efc..3f380a0 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -463,15 +463,12 @@
 			}
 			par->fbtftops.write_register = fbtft_write_reg8_bus9;
 			par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
-			sdev->bits_per_word = 9;
-			ret = sdev->master->setup(sdev);
-			if (ret) {
+			if (par->spi->master->bits_per_word_mask
+			    & SPI_BPW_MASK(9)) {
+				par->spi->bits_per_word = 9;
+			} else {
 				dev_warn(dev,
 					"9-bit SPI not available, emulating using 8-bit.\n");
-				sdev->bits_per_word = 8;
-				ret = sdev->master->setup(sdev);
-				if (ret)
-					goto out_release;
 				/* allocate buffer with room for dc bits */
 				par->extra = devm_kzalloc(par->info->device,
 						par->txbuf.len + (par->txbuf.len / 8) + 8,
diff --git a/drivers/staging/lustre/README.txt b/drivers/staging/lustre/README.txt
index cf0ca50..0676243 100644
--- a/drivers/staging/lustre/README.txt
+++ b/drivers/staging/lustre/README.txt
@@ -14,10 +14,8 @@
 Lustre has independent Metadata and Data servers that clients can access
 in parallel to maximize performance.
 
-In order to use Lustre client you will need to download lustre client
-tools from
-https://downloads.hpdd.intel.com/public/lustre/latest-feature-release/
-the package name is lustre-client.
+In order to use Lustre client you will need to download the "lustre-client"
+package that contains the userspace tools from http://lustre.org/download/
 
 You will need to install and configure your Lustre servers separately.
 
@@ -76,12 +74,10 @@
 
 More Information
 ================
-You can get more information at
-OpenSFS website: http://lustre.opensfs.org/about/
-Intel HPDD wiki: https://wiki.hpdd.intel.com
+You can get more information at the Lustre website: http://wiki.lustre.org/
 
-Out of tree Lustre client and server code is available at:
-http://git.whamcloud.com/fs/lustre-release.git
+Source for the userspace tools and out-of-tree client and server code
+is available at: http://git.hpdd.intel.com/fs/lustre-release.git
 
 Latest binary packages:
-http://lustre.opensfs.org/download-lustre/
+http://lustre.org/download/
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 769b611..a9bc6e2 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -224,7 +224,7 @@
 
 		prefetchw(&page->flags);
 		ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
-					    GFP_KERNEL);
+					    GFP_NOFS);
 		if (ret == 0) {
 			unlock_page(page);
 		} else {
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index d50de03..0b9b9b5 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,5 +1,6 @@
 menuconfig MOST
         tristate "MOST driver"
+	depends on HAS_DMA
         select MOSTCORE
         default n
         ---help---
diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/hdm-dim2/Kconfig
index 1d4ad1d..fc54876 100644
--- a/drivers/staging/most/hdm-dim2/Kconfig
+++ b/drivers/staging/most/hdm-dim2/Kconfig
@@ -5,6 +5,7 @@
 config HDM_DIM2
 	tristate "DIM2 HDM"
 	depends on AIM_NETWORK
+	depends on HAS_IOMEM
 
 	---help---
 	  Say Y here if you want to connect via MediaLB to network transceiver.
diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/hdm-usb/Kconfig
index a482c3f..ec15463 100644
--- a/drivers/staging/most/hdm-usb/Kconfig
+++ b/drivers/staging/most/hdm-usb/Kconfig
@@ -4,7 +4,7 @@
 
 config HDM_USB
 	tristate "USB HDM"
-	depends on USB
+	depends on USB && NET
 	select AIM_NETWORK
 	---help---
 	  Say Y here if you want to connect via USB to network tranceiver.
diff --git a/drivers/staging/most/mostcore/Kconfig b/drivers/staging/most/mostcore/Kconfig
index 38abf1b..4717254 100644
--- a/drivers/staging/most/mostcore/Kconfig
+++ b/drivers/staging/most/mostcore/Kconfig
@@ -4,6 +4,7 @@
 
 config MOSTCORE
 	tristate "MOST Core"
+	depends on HAS_DMA
 
 	---help---
 	  Say Y here if you want to enable MOST support.
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
index 4299cf4..5e1f16c 100644
--- a/drivers/staging/speakup/fakekey.c
+++ b/drivers/staging/speakup/fakekey.c
@@ -81,6 +81,7 @@
 	__this_cpu_write(reporting_keystroke, true);
 	input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
 	input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
+	input_sync(virt_keyboard);
 	__this_cpu_write(reporting_keystroke, false);
 
 	/* reenable preemption */
diff --git a/drivers/staging/unisys/visorbus/Makefile b/drivers/staging/unisys/visorbus/Makefile
index fa27ee5..fc790e7 100644
--- a/drivers/staging/unisys/visorbus/Makefile
+++ b/drivers/staging/unisys/visorbus/Makefile
@@ -10,4 +10,3 @@
 visorbus-y += periodic_work.o
 
 ccflags-y += -Idrivers/staging/unisys/include
-ccflags-y += -Idrivers/staging/unisys/visorutil
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 2309f5f..a272b48 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -37,6 +37,8 @@
 #define POLLJIFFIES_TESTWORK         100
 #define POLLJIFFIES_NORMALCHANNEL     10
 
+static int busreg_rc = -ENODEV; /* stores the result from bus registration */
+
 static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env);
 static int visorbus_match(struct device *xdev, struct device_driver *xdrv);
 static void fix_vbus_dev_info(struct visor_device *visordev);
@@ -863,6 +865,9 @@
 {
 	int rc = 0;
 
+	if (busreg_rc < 0)
+		return -ENODEV; /*can't register on a nonexistent bus*/
+
 	drv->driver.name = drv->name;
 	drv->driver.bus = &visorbus_type;
 	drv->driver.probe = visordriver_probe_device;
@@ -885,6 +890,8 @@
 	if (rc < 0)
 		return rc;
 	rc = register_driver_attributes(drv);
+	if (rc < 0)
+		driver_unregister(&drv->driver);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
@@ -1260,10 +1267,8 @@
 static int
 create_bus_type(void)
 {
-	int rc = 0;
-
-	rc = bus_register(&visorbus_type);
-	return rc;
+	busreg_rc = bus_register(&visorbus_type);
+	return busreg_rc;
 }
 
 /** Remove the one-and-only one instance of the visor bus type (visorbus_type).
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 8c9da7e..9d3c1e2 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1189,16 +1189,16 @@
 	spin_lock_irqsave(&devdata->priv_lock, flags);
 	atomic_dec(&devdata->num_rcvbuf_in_iovm);
 
-	/* update rcv stats - call it with priv_lock held */
-	devdata->net_stats.rx_packets++;
-	devdata->net_stats.rx_bytes = skb->len;
-
 	/* set length to how much was ACTUALLY received -
 	 * NOTE: rcv_done_len includes actual length of data rcvd
 	 * including ethhdr
 	 */
 	skb->len = cmdrsp->net.rcv.rcv_done_len;
 
+	/* update rcv stats - call it with priv_lock held */
+	devdata->net_stats.rx_packets++;
+	devdata->net_stats.rx_bytes += skb->len;
+
 	/* test enabled while holding lock */
 	if (!(devdata->enabled && devdata->enab_dis_acked)) {
 		/* don't process it unless we're in enable mode and until
@@ -1924,13 +1924,16 @@
 			"%s debugfs_create_dir %s failed\n",
 			__func__, netdev->name);
 		err = -ENOMEM;
-		goto cleanup_xmit_cmdrsp;
+		goto cleanup_register_netdev;
 	}
 
 	dev_info(&dev->device, "%s success netdev=%s\n",
 		 __func__, netdev->name);
 	return 0;
 
+cleanup_register_netdev:
+	unregister_netdev(netdev);
+
 cleanup_napi_add:
 	del_timer_sync(&devdata->irq_poll_timer);
 	netif_napi_del(&devdata->napi);
@@ -2128,8 +2131,9 @@
 	if (!dev_num_pool)
 		goto cleanup_workqueue;
 
-	visorbus_register_visor_driver(&visornic_driver);
-	return 0;
+	err = visorbus_register_visor_driver(&visornic_driver);
+	if (!err)
+		return 0;
 
 cleanup_workqueue:
 	if (visornic_timeout_reset_workqueue) {
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index e8a52f7..51d1734 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -407,6 +407,7 @@
 			TYPERANGE_UTF8, USE_INITIAL_ONLY);
 	if (!param)
 		goto out;
+
 	/*
 	 * Extra parameters for ISER from RFC-5046
 	 */
@@ -496,9 +497,9 @@
 		} else if (!strcmp(param->name, SESSIONTYPE)) {
 			SET_PSTATE_NEGOTIATE(param);
 		} else if (!strcmp(param->name, IFMARKER)) {
-			SET_PSTATE_NEGOTIATE(param);
+			SET_PSTATE_REJECT(param);
 		} else if (!strcmp(param->name, OFMARKER)) {
-			SET_PSTATE_NEGOTIATE(param);
+			SET_PSTATE_REJECT(param);
 		} else if (!strcmp(param->name, IFMARKINT)) {
 			SET_PSTATE_REJECT(param);
 		} else if (!strcmp(param->name, OFMARKINT)) {
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index dcc424a..88ea4e4 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -62,22 +62,13 @@
 	struct se_session *se_sess = se_cmd->se_sess;
 	struct se_node_acl *nacl = se_sess->se_node_acl;
 	struct se_dev_entry *deve;
+	sense_reason_t ret = TCM_NO_SENSE;
 
 	rcu_read_lock();
 	deve = target_nacl_find_deve(nacl, unpacked_lun);
 	if (deve) {
 		atomic_long_inc(&deve->total_cmds);
 
-		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
-		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
-			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
-				" Access for 0x%08llx\n",
-				se_cmd->se_tfo->get_fabric_name(),
-				unpacked_lun);
-			rcu_read_unlock();
-			return TCM_WRITE_PROTECTED;
-		}
-
 		if (se_cmd->data_direction == DMA_TO_DEVICE)
 			atomic_long_add(se_cmd->data_length,
 					&deve->write_bytes);
@@ -93,6 +84,17 @@
 
 		percpu_ref_get(&se_lun->lun_ref);
 		se_cmd->lun_ref_active = true;
+
+		if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
+		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
+			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+				" Access for 0x%08llx\n",
+				se_cmd->se_tfo->get_fabric_name(),
+				unpacked_lun);
+			rcu_read_unlock();
+			ret = TCM_WRITE_PROTECTED;
+			goto ref_dev;
+		}
 	}
 	rcu_read_unlock();
 
@@ -109,12 +111,6 @@
 				unpacked_lun);
 			return TCM_NON_EXISTENT_LUN;
 		}
-		/*
-		 * Force WRITE PROTECT for virtual LUN 0
-		 */
-		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
-		    (se_cmd->data_direction != DMA_NONE))
-			return TCM_WRITE_PROTECTED;
 
 		se_lun = se_sess->se_tpg->tpg_virt_lun0;
 		se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
@@ -123,6 +119,15 @@
 
 		percpu_ref_get(&se_lun->lun_ref);
 		se_cmd->lun_ref_active = true;
+
+		/*
+		 * Force WRITE PROTECT for virtual LUN 0
+		 */
+		if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+		    (se_cmd->data_direction != DMA_NONE)) {
+			ret = TCM_WRITE_PROTECTED;
+			goto ref_dev;
+		}
 	}
 	/*
 	 * RCU reference protected by percpu se_lun->lun_ref taken above that
@@ -130,6 +135,7 @@
 	 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
 	 * target_core_fabric_configfs.c:target_fabric_port_release
 	 */
+ref_dev:
 	se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 	atomic_long_inc(&se_cmd->se_dev->num_cmds);
 
@@ -140,7 +146,7 @@
 		atomic_long_add(se_cmd->data_length,
 				&se_cmd->se_dev->read_bytes);
 
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL(transport_lookup_cmd_lun);
 
@@ -427,8 +433,6 @@
 
 	hlist_del_rcu(&orig->link);
 	clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
-	rcu_assign_pointer(orig->se_lun, NULL);
-	rcu_assign_pointer(orig->se_lun_acl, NULL);
 	orig->lun_flags = 0;
 	orig->creation_time = 0;
 	orig->attach_count--;
@@ -439,6 +443,9 @@
 	kref_put(&orig->pr_kref, target_pr_kref_release);
 	wait_for_completion(&orig->pr_comp);
 
+	rcu_assign_pointer(orig->se_lun, NULL);
+	rcu_assign_pointer(orig->se_lun_acl, NULL);
+
 	kfree_rcu(orig, rcu_head);
 
 	core_scsi3_free_pr_reg_from_nacl(dev, nacl);
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 9522960..22390e0 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -187,5 +187,5 @@
 
 bool target_sense_desc_format(struct se_device *dev)
 {
-	return dev->transport->get_blocks(dev) > U32_MAX;
+	return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
 }
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 5a9982f..0f19e11 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -105,6 +105,8 @@
 	mode = FMODE_READ|FMODE_EXCL;
 	if (!ib_dev->ibd_readonly)
 		mode |= FMODE_WRITE;
+	else
+		dev->dev_flags |= DF_READ_ONLY;
 
 	bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
 	if (IS_ERR(bd)) {
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5ab7100..e793311 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -618,7 +618,7 @@
 	struct se_device *dev,
 	struct se_node_acl *nacl,
 	struct se_lun *lun,
-	struct se_dev_entry *deve,
+	struct se_dev_entry *dest_deve,
 	u64 mapped_lun,
 	unsigned char *isid,
 	u64 sa_res_key,
@@ -640,7 +640,29 @@
 	INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
 	atomic_set(&pr_reg->pr_res_holders, 0);
 	pr_reg->pr_reg_nacl = nacl;
-	pr_reg->pr_reg_deve = deve;
+	/*
+	 * For destination registrations for ALL_TG_PT=1 and SPEC_I_PT=1,
+	 * the se_dev_entry->pr_ref will have been already obtained by
+	 * core_get_se_deve_from_rtpi() or __core_scsi3_alloc_registration().
+	 *
+	 * Otherwise, locate se_dev_entry now and obtain a reference until
+	 * registration completes in __core_scsi3_add_registration().
+	 */
+	if (dest_deve) {
+		pr_reg->pr_reg_deve = dest_deve;
+	} else {
+		rcu_read_lock();
+		pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
+		if (!pr_reg->pr_reg_deve) {
+			rcu_read_unlock();
+			pr_err("Unable to locate PR deve %s mapped_lun: %llu\n",
+				nacl->initiatorname, mapped_lun);
+			kmem_cache_free(t10_pr_reg_cache, pr_reg);
+			return NULL;
+		}
+		kref_get(&pr_reg->pr_reg_deve->pr_kref);
+		rcu_read_unlock();
+	}
 	pr_reg->pr_res_mapped_lun = mapped_lun;
 	pr_reg->pr_aptpl_target_lun = lun->unpacked_lun;
 	pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
@@ -936,17 +958,29 @@
 		    !(strcmp(pr_reg->pr_tport, t_port)) &&
 		     (pr_reg->pr_reg_tpgt == tpgt) &&
 		     (pr_reg->pr_aptpl_target_lun == target_lun)) {
+			/*
+			 * Obtain the ->pr_reg_deve pointer + reference, that
+			 * is released by __core_scsi3_add_registration() below.
+			 */
+			rcu_read_lock();
+			pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
+			if (!pr_reg->pr_reg_deve) {
+				pr_err("Unable to locate PR APTPL %s mapped_lun:"
+					" %llu\n", nacl->initiatorname, mapped_lun);
+				rcu_read_unlock();
+				continue;
+			}
+			kref_get(&pr_reg->pr_reg_deve->pr_kref);
+			rcu_read_unlock();
 
 			pr_reg->pr_reg_nacl = nacl;
 			pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
-
 			list_del(&pr_reg->pr_reg_aptpl_list);
 			spin_unlock(&pr_tmpl->aptpl_reg_lock);
 			/*
 			 * At this point all of the pointers in *pr_reg will
 			 * be setup, so go ahead and add the registration.
 			 */
-
 			__core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
 			/*
 			 * If this registration is the reservation holder,
@@ -1044,18 +1078,11 @@
 
 	__core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
 	spin_unlock(&pr_tmpl->registration_lock);
-
-	rcu_read_lock();
-	deve = pr_reg->pr_reg_deve;
-	if (deve)
-		set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
-	rcu_read_unlock();
-
 	/*
 	 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
 	 */
 	if (!pr_reg->pr_reg_all_tg_pt || register_move)
-		return;
+		goto out;
 	/*
 	 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
 	 * allocated in __core_scsi3_alloc_registration()
@@ -1075,19 +1102,31 @@
 		__core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp,
 					       register_type);
 		spin_unlock(&pr_tmpl->registration_lock);
-
+		/*
+		 * Drop configfs group dependency reference and deve->pr_kref
+		 * obtained from  __core_scsi3_alloc_registration() code.
+		 */
 		rcu_read_lock();
 		deve = pr_reg_tmp->pr_reg_deve;
-		if (deve)
+		if (deve) {
 			set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+			core_scsi3_lunacl_undepend_item(deve);
+			pr_reg_tmp->pr_reg_deve = NULL;
+		}
 		rcu_read_unlock();
-
-		/*
-		 * Drop configfs group dependency reference from
-		 * __core_scsi3_alloc_registration()
-		 */
-		core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
 	}
+out:
+	/*
+	 * Drop deve->pr_kref obtained in __core_scsi3_do_alloc_registration()
+	 */
+	rcu_read_lock();
+	deve = pr_reg->pr_reg_deve;
+	if (deve) {
+		set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+		kref_put(&deve->pr_kref, target_pr_kref_release);
+		pr_reg->pr_reg_deve = NULL;
+	}
+	rcu_read_unlock();
 }
 
 static int core_scsi3_alloc_registration(
@@ -1785,9 +1824,11 @@
 			dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
 			dest_se_deve->mapped_lun : 0);
 
-		if (!dest_se_deve)
+		if (!dest_se_deve) {
+			kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
+				 target_pr_kref_release);
 			continue;
-
+		}
 		core_scsi3_lunacl_undepend_item(dest_se_deve);
 		core_scsi3_nodeacl_undepend_item(dest_node_acl);
 		core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1823,9 +1864,11 @@
 
 		kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
 
-		if (!dest_se_deve)
+		if (!dest_se_deve) {
+			kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
+				 target_pr_kref_release);
 			continue;
-
+		}
 		core_scsi3_lunacl_undepend_item(dest_se_deve);
 		core_scsi3_nodeacl_undepend_item(dest_node_acl);
 		core_scsi3_tpg_undepend_item(dest_tpg);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 2d0381d..5fb9dd7 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -668,7 +668,10 @@
 	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
 	spin_unlock(&dev->se_port_lock);
 
-	lun->lun_access = lun_access;
+	if (dev->dev_flags & DF_READ_ONLY)
+		lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+	else
+		lun->lun_access = lun_access;
 	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
 		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
 	mutex_unlock(&tpg->tpg_lun_mutex);
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 7ff9627..e570ff0 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -144,6 +144,16 @@
 		switch_on_temp = 0;
 
 	temperature_threshold = control_temp - switch_on_temp;
+	/*
+	 * estimate_pid_constants() tries to find appropriate default
+	 * values for thermal zones that don't provide them. If a
+	 * system integrator has configured a thermal zone with two
+	 * passive trip points at the same temperature, that person
+	 * hasn't put any effort to set up the thermal zone properly
+	 * so just give up.
+	 */
+	if (!temperature_threshold)
+		return;
 
 	if (!tz->tzp->k_po || force)
 		tz->tzp->k_po = int_to_frac(sustainable_power) /
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index c68fe12..20a41f7 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -643,7 +643,7 @@
 	{
 		.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
 		.vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
-		.subvendor = 0x2222, .subdevice = 0x1111,
+		.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
 	},
 	{ 0,}
 };
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 20932cc..b09023b 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -343,8 +343,7 @@
 		spin_lock_irqsave(&tty->ctrl_lock, flags);
 		tty->ctrl_status |= TIOCPKT_FLUSHREAD;
 		spin_unlock_irqrestore(&tty->ctrl_lock, flags);
-		if (waitqueue_active(&tty->link->read_wait))
-			wake_up_interruptible(&tty->link->read_wait);
+		wake_up_interruptible(&tty->link->read_wait);
 	}
 }
 
@@ -1382,8 +1381,7 @@
 			put_tty_queue(c, ldata);
 			smp_store_release(&ldata->canon_head, ldata->read_head);
 			kill_fasync(&tty->fasync, SIGIO, POLL_IN);
-			if (waitqueue_active(&tty->read_wait))
-				wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+			wake_up_interruptible_poll(&tty->read_wait, POLLIN);
 			return 0;
 		}
 	}
@@ -1667,8 +1665,7 @@
 
 	if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) {
 		kill_fasync(&tty->fasync, SIGIO, POLL_IN);
-		if (waitqueue_active(&tty->read_wait))
-			wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+		wake_up_interruptible_poll(&tty->read_wait, POLLIN);
 	}
 }
 
@@ -1887,10 +1884,8 @@
 	}
 
 	/* The termios change make the tty ready for I/O */
-	if (waitqueue_active(&tty->write_wait))
-		wake_up_interruptible(&tty->write_wait);
-	if (waitqueue_active(&tty->read_wait))
-		wake_up_interruptible(&tty->read_wait);
+	wake_up_interruptible(&tty->write_wait);
+	wake_up_interruptible(&tty->read_wait);
 }
 
 /**
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 54e6c8d..0bbf340 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -261,6 +261,14 @@
 				  UART_FCR7_64BYTE,
 		.flags		= UART_CAP_FIFO,
 	},
+	[PORT_RT2880] = {
+		.name		= "Palmchip BK-3103",
+		.fifo_size	= 16,
+		.tx_loadsz	= 16,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+		.rxtrig_bytes	= {1, 4, 8, 14},
+		.flags		= UART_CAP_FIFO,
+	},
 };
 
 /* Uart divisor latch read */
@@ -2910,3 +2918,5 @@
 }
 
 #endif /* CONFIG_SERIAL_8250_CONSOLE */
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 5ca5cf3..538ea03 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2786,7 +2786,7 @@
 	ret = atmel_init_gpios(port, &pdev->dev);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Failed to initialize GPIOs.");
-		goto err;
+		goto err_clear_bit;
 	}
 
 	ret = atmel_init_port(port, pdev);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index fe3d41c..d0388a0 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1631,12 +1631,12 @@
 	int locked = 1;
 	int retval;
 
-	retval = clk_prepare_enable(sport->clk_per);
+	retval = clk_enable(sport->clk_per);
 	if (retval)
 		return;
-	retval = clk_prepare_enable(sport->clk_ipg);
+	retval = clk_enable(sport->clk_ipg);
 	if (retval) {
-		clk_disable_unprepare(sport->clk_per);
+		clk_disable(sport->clk_per);
 		return;
 	}
 
@@ -1675,8 +1675,8 @@
 	if (locked)
 		spin_unlock_irqrestore(&sport->port.lock, flags);
 
-	clk_disable_unprepare(sport->clk_ipg);
-	clk_disable_unprepare(sport->clk_per);
+	clk_disable(sport->clk_ipg);
+	clk_disable(sport->clk_per);
 }
 
 /*
@@ -1777,7 +1777,15 @@
 
 	retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
 
-	clk_disable_unprepare(sport->clk_ipg);
+	clk_disable(sport->clk_ipg);
+	if (retval) {
+		clk_unprepare(sport->clk_ipg);
+		goto error_console;
+	}
+
+	retval = clk_prepare(sport->clk_per);
+	if (retval)
+		clk_disable_unprepare(sport->clk_ipg);
 
 error_console:
 	return retval;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 5a3fa89..a660ab1 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -242,7 +242,10 @@
 	atomic_inc(&buf->priority);
 
 	mutex_lock(&buf->lock);
-	while ((next = buf->head->next) != NULL) {
+	/* paired w/ release in __tty_buffer_request_room; ensures there are
+	 * no pending memory accesses to the freed buffer
+	 */
+	while ((next = smp_load_acquire(&buf->head->next)) != NULL) {
 		tty_buffer_free(port, buf->head);
 		buf->head = next;
 	}
@@ -290,7 +293,10 @@
 		if (n != NULL) {
 			n->flags = flags;
 			buf->tail = n;
-			b->commit = b->used;
+			/* paired w/ acquire in flush_to_ldisc(); ensures
+			 * flush_to_ldisc() sees buffer data.
+			 */
+			smp_store_release(&b->commit, b->used);
 			/* paired w/ acquire in flush_to_ldisc(); ensures the
 			 * latest commit value can be read before the head is
 			 * advanced to the next buffer
@@ -393,7 +399,10 @@
 {
 	struct tty_bufhead *buf = &port->buf;
 
-	buf->tail->commit = buf->tail->used;
+	/* paired w/ acquire in flush_to_ldisc(); ensures
+	 * flush_to_ldisc() sees buffer data.
+	 */
+	smp_store_release(&buf->tail->commit, buf->tail->used);
 	schedule_work(&buf->work);
 }
 EXPORT_SYMBOL(tty_schedule_flip);
@@ -467,7 +476,7 @@
 	struct tty_struct *tty;
 	struct tty_ldisc *disc;
 
-	tty = port->itty;
+	tty = READ_ONCE(port->itty);
 	if (tty == NULL)
 		return;
 
@@ -491,7 +500,10 @@
 		 * is advancing to the next buffer
 		 */
 		next = smp_load_acquire(&head->next);
-		count = head->commit - head->read;
+		/* paired w/ release in __tty_buffer_request_room() or in
+		 * tty_buffer_flush(); ensures we see the committed buffer data
+		 */
+		count = smp_load_acquire(&head->commit) - head->read;
 		if (!count) {
 			if (next == NULL) {
 				check_other_closed(tty);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 02785d8..2eefaa6 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2128,8 +2128,24 @@
 	if (!noctty &&
 	    current->signal->leader &&
 	    !current->signal->tty &&
-	    tty->session == NULL)
-		__proc_set_tty(tty);
+	    tty->session == NULL) {
+		/*
+		 * Don't let a process that only has write access to the tty
+		 * obtain the privileges associated with having a tty as
+		 * controlling terminal (being able to reopen it with full
+		 * access through /dev/tty, being able to perform pushback).
+		 * Many distributions set the group of all ttys to "tty" and
+		 * grant write-only access to all terminals for setgid tty
+		 * binaries, which should not imply full privileges on all ttys.
+		 *
+		 * This could theoretically break old code that performs open()
+		 * on a write-only file descriptor. In that case, it might be
+		 * necessary to also permit this if
+		 * inode_permission(inode, MAY_READ) == 0.
+		 */
+		if (filp->f_mode & FMODE_READ)
+			__proc_set_tty(tty);
+	}
 	spin_unlock_irq(&current->sighand->siglock);
 	read_unlock(&tasklist_lock);
 	tty_unlock(tty);
@@ -2418,7 +2434,7 @@
  *		Takes ->siglock() when updating signal->tty
  */
 
-static int tiocsctty(struct tty_struct *tty, int arg)
+static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
 {
 	int ret = 0;
 
@@ -2452,6 +2468,13 @@
 			goto unlock;
 		}
 	}
+
+	/* See the comment in tty_open(). */
+	if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) {
+		ret = -EPERM;
+		goto unlock;
+	}
+
 	proc_set_tty(tty);
 unlock:
 	read_unlock(&tasklist_lock);
@@ -2844,7 +2867,7 @@
 		no_tty();
 		return 0;
 	case TIOCSCTTY:
-		return tiocsctty(tty, arg);
+		return tiocsctty(tty, file, arg);
 	case TIOCGPGRP:
 		return tiocgpgrp(tty, real_tty, p);
 	case TIOCSPGRP:
@@ -3151,13 +3174,18 @@
 static int tty_cdev_add(struct tty_driver *driver, dev_t dev,
 		unsigned int index, unsigned int count)
 {
+	int err;
+
 	/* init here, since reused cdevs cause crashes */
 	driver->cdevs[index] = cdev_alloc();
 	if (!driver->cdevs[index])
 		return -ENOMEM;
-	cdev_init(driver->cdevs[index], &tty_fops);
+	driver->cdevs[index]->ops = &tty_fops;
 	driver->cdevs[index]->owner = driver->owner;
-	return cdev_add(driver->cdevs[index], dev, count);
+	err = cdev_add(driver->cdevs[index], dev, count);
+	if (err)
+		kobject_put(&driver->cdevs[index]->kobj);
+	return err;
 }
 
 /**
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 867e9f3..dcc50c87 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -61,7 +61,7 @@
 	{ .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
 	{ .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
 	{ .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data},
-	{ .compatible = "fsl,imx6sx-usb", .data = &imx6sl_usb_data},
+	{ .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index 9eae1a1..4456d2c 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -12,6 +12,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
 #include <linux/usb/chipidea.h>
@@ -30,18 +31,36 @@
 	.flags		= CI_HDRC_DISABLE_STREAMING,
 };
 
+static struct ci_hdrc_platform_data ci_zynq_pdata = {
+	.capoffset	= DEF_CAPOFFSET,
+};
+
+static const struct of_device_id ci_hdrc_usb2_of_match[] = {
+	{ .compatible = "chipidea,usb2"},
+	{ .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata},
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
+
 static int ci_hdrc_usb2_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct ci_hdrc_usb2_priv *priv;
 	struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev);
 	int ret;
+	const struct of_device_id *match;
 
 	if (!ci_pdata) {
 		ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL);
 		*ci_pdata = ci_default_pdata;	/* struct copy */
 	}
 
+	match = of_match_device(ci_hdrc_usb2_of_match, &pdev->dev);
+	if (match && match->data) {
+		/* struct copy */
+		*ci_pdata = *(struct ci_hdrc_platform_data *)match->data;
+	}
+
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
@@ -96,12 +115,6 @@
 	return 0;
 }
 
-static const struct of_device_id ci_hdrc_usb2_of_match[] = {
-	{ .compatible = "chipidea,usb2" },
-	{ }
-};
-MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
-
 static struct platform_driver ci_hdrc_usb2_driver = {
 	.probe	= ci_hdrc_usb2_probe,
 	.remove	= ci_hdrc_usb2_remove,
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index a637da2..8223fe7 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -656,6 +656,44 @@
 	return 0;
 }
 
+static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
+{
+	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
+	int direction, retval = 0;
+	unsigned long flags;
+
+	if (ep == NULL || hwep->ep.desc == NULL)
+		return -EINVAL;
+
+	if (usb_endpoint_xfer_isoc(hwep->ep.desc))
+		return -EOPNOTSUPP;
+
+	spin_lock_irqsave(hwep->lock, flags);
+
+	if (value && hwep->dir == TX && check_transfer &&
+		!list_empty(&hwep->qh.queue) &&
+			!usb_endpoint_xfer_control(hwep->ep.desc)) {
+		spin_unlock_irqrestore(hwep->lock, flags);
+		return -EAGAIN;
+	}
+
+	direction = hwep->dir;
+	do {
+		retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
+
+		if (!value)
+			hwep->wedge = 0;
+
+		if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
+			hwep->dir = (hwep->dir == TX) ? RX : TX;
+
+	} while (hwep->dir != direction);
+
+	spin_unlock_irqrestore(hwep->lock, flags);
+	return retval;
+}
+
+
 /**
  * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
  * @gadget: gadget
@@ -1051,7 +1089,7 @@
 				num += ci->hw_ep_max / 2;
 
 			spin_unlock(&ci->lock);
-			err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
+			err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
 			spin_lock(&ci->lock);
 			if (!err)
 				isr_setup_status_phase(ci);
@@ -1117,8 +1155,8 @@
 
 	if (err < 0) {
 		spin_unlock(&ci->lock);
-		if (usb_ep_set_halt(&hwep->ep))
-			dev_err(ci->dev, "error: ep_set_halt\n");
+		if (_ep_set_halt(&hwep->ep, 1, false))
+			dev_err(ci->dev, "error: _ep_set_halt\n");
 		spin_lock(&ci->lock);
 	}
 }
@@ -1149,9 +1187,9 @@
 					err = isr_setup_status_phase(ci);
 				if (err < 0) {
 					spin_unlock(&ci->lock);
-					if (usb_ep_set_halt(&hwep->ep))
+					if (_ep_set_halt(&hwep->ep, 1, false))
 						dev_err(ci->dev,
-							"error: ep_set_halt\n");
+						"error: _ep_set_halt\n");
 					spin_lock(&ci->lock);
 				}
 			}
@@ -1397,41 +1435,7 @@
  */
 static int ep_set_halt(struct usb_ep *ep, int value)
 {
-	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
-	int direction, retval = 0;
-	unsigned long flags;
-
-	if (ep == NULL || hwep->ep.desc == NULL)
-		return -EINVAL;
-
-	if (usb_endpoint_xfer_isoc(hwep->ep.desc))
-		return -EOPNOTSUPP;
-
-	spin_lock_irqsave(hwep->lock, flags);
-
-#ifndef STALL_IN
-	/* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
-	if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
-	    !list_empty(&hwep->qh.queue)) {
-		spin_unlock_irqrestore(hwep->lock, flags);
-		return -EAGAIN;
-	}
-#endif
-
-	direction = hwep->dir;
-	do {
-		retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
-
-		if (!value)
-			hwep->wedge = 0;
-
-		if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
-			hwep->dir = (hwep->dir == TX) ? RX : TX;
-
-	} while (hwep->dir != direction);
-
-	spin_unlock_irqrestore(hwep->lock, flags);
-	return retval;
+	return _ep_set_halt(ep, value, true);
 }
 
 /**
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index b2a540b..b9ddf0c 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -112,7 +112,7 @@
 				cfgno, inum, asnum, ep->desc.bEndpointAddress);
 		ep->ss_ep_comp.bmAttributes = 16;
 	} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
-			desc->bmAttributes > 2) {
+		   USB_SS_MULT(desc->bmAttributes) > 3) {
 		dev_warn(ddev, "Isoc endpoint has Mult of %d in "
 				"config %d interface %d altsetting %d ep %d: "
 				"setting to 3\n", desc->bmAttributes + 1,
@@ -121,7 +121,8 @@
 	}
 
 	if (usb_endpoint_xfer_isoc(&ep->desc))
-		max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
+		max_tx = (desc->bMaxBurst + 1) *
+			(USB_SS_MULT(desc->bmAttributes)) *
 			usb_endpoint_maxp(&ep->desc);
 	else if (usb_endpoint_xfer_int(&ep->desc))
 		max_tx = usb_endpoint_maxp(&ep->desc) *
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d85abfe..f5a3819 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -54,6 +54,13 @@
 	{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
 	{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
 
+	/* Logitech ConferenceCam CC3000e */
+	{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
+	{ USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
+
+	/* Logitech PTZ Pro Camera */
+	{ USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+
 	/* Logitech Quickcam Fusion */
 	{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -78,6 +85,12 @@
 	/* Philips PSC805 audio device */
 	{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
 
+	/* Plantronic Audio 655 DSP */
+	{ USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
+
+	/* Plantronic Audio 648 USB */
+	{ USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
+
 	/* Artisman Watchdog Dongle */
 	{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
 			USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index a5a1b7c..22e9606 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -514,8 +514,6 @@
 		goto err1;
 	}
 
-	dwc3_omap_enable_irqs(omap);
-
 	ret = dwc3_omap_extcon_register(omap);
 	if (ret < 0)
 		goto err2;
@@ -526,6 +524,8 @@
 		goto err3;
 	}
 
+	dwc3_omap_enable_irqs(omap);
+
 	return 0;
 
 err3:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0c25704..1e8bdf8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2665,8 +2665,6 @@
 	int				i;
 	irqreturn_t			ret = IRQ_NONE;
 
-	spin_lock(&dwc->lock);
-
 	for (i = 0; i < dwc->num_event_buffers; i++) {
 		irqreturn_t status;
 
@@ -2675,8 +2673,6 @@
 			ret = status;
 	}
 
-	spin_unlock(&dwc->lock);
-
 	return ret;
 }
 
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 978435a..6399c10 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -186,6 +186,7 @@
 
 	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
 		ep->claimed = false;
+		ep->driver_data = NULL;
 	}
 	gadget->in_epnum = 0;
 	gadget->out_epnum = 0;
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c
index fdacddb..175ca93 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/amd5536udc.c
@@ -3138,8 +3138,8 @@
 	writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
 	if (dev->irq_registered)
 		free_irq(pdev->irq, dev);
-	if (dev->regs)
-		iounmap(dev->regs);
+	if (dev->virt_addr)
+		iounmap(dev->virt_addr);
 	if (dev->mem_region)
 		release_mem_region(pci_resource_start(pdev, 0),
 				pci_resource_len(pdev, 0));
@@ -3226,17 +3226,13 @@
 
 	/* init */
 	dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
-	if (!dev) {
-		retval = -ENOMEM;
-		goto finished;
-	}
+	if (!dev)
+		return -ENOMEM;
 
 	/* pci setup */
 	if (pci_enable_device(pdev) < 0) {
-		kfree(dev);
-		dev = NULL;
 		retval = -ENODEV;
-		goto finished;
+		goto err_pcidev;
 	}
 	dev->active = 1;
 
@@ -3246,28 +3242,22 @@
 
 	if (!request_mem_region(resource, len, name)) {
 		dev_dbg(&pdev->dev, "pci device used already\n");
-		kfree(dev);
-		dev = NULL;
 		retval = -EBUSY;
-		goto finished;
+		goto err_memreg;
 	}
 	dev->mem_region = 1;
 
 	dev->virt_addr = ioremap_nocache(resource, len);
 	if (dev->virt_addr == NULL) {
 		dev_dbg(&pdev->dev, "start address cannot be mapped\n");
-		kfree(dev);
-		dev = NULL;
 		retval = -EFAULT;
-		goto finished;
+		goto err_ioremap;
 	}
 
 	if (!pdev->irq) {
 		dev_err(&pdev->dev, "irq not set\n");
-		kfree(dev);
-		dev = NULL;
 		retval = -ENODEV;
-		goto finished;
+		goto err_irq;
 	}
 
 	spin_lock_init(&dev->lock);
@@ -3283,10 +3273,8 @@
 
 	if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
 		dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
-		kfree(dev);
-		dev = NULL;
 		retval = -EBUSY;
-		goto finished;
+		goto err_irq;
 	}
 	dev->irq_registered = 1;
 
@@ -3314,8 +3302,17 @@
 		return 0;
 
 finished:
-	if (dev)
-		udc_pci_remove(pdev);
+	udc_pci_remove(pdev);
+	return retval;
+
+err_irq:
+	iounmap(dev->virt_addr);
+err_ioremap:
+	release_mem_region(resource, len);
+err_memreg:
+	pci_disable_device(pdev);
+err_pcidev:
+	kfree(dev);
 	return retval;
 }
 
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 3dfada8..f0f2b06 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2002,6 +2002,17 @@
 		ep->udc = udc;
 		INIT_LIST_HEAD(&ep->queue);
 
+		if (ep->index == 0) {
+			ep->ep.caps.type_control = true;
+		} else {
+			ep->ep.caps.type_iso = ep->can_isoc;
+			ep->ep.caps.type_bulk = true;
+			ep->ep.caps.type_int = true;
+		}
+
+		ep->ep.caps.dir_in = true;
+		ep->ep.caps.dir_out = true;
+
 		if (i)
 			list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
 
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 5c8f4ef..ccb9c21 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -324,8 +324,7 @@
 				bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
 
 	/* Destroy the dma pools */
-	if (bdc->bd_table_pool)
-		dma_pool_destroy(bdc->bd_table_pool);
+	dma_pool_destroy(bdc->bd_table_pool);
 
 	/* Free the bdc_ep array */
 	kfree(bdc->bdc_ep_array);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index d1b8153..d619950 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -159,8 +159,10 @@
 		bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool,
 							GFP_ATOMIC,
 							&dma);
-		if (!bd_table->start_bd)
+		if (!bd_table->start_bd) {
+			kfree(bd_table);
 			goto fail;
+		}
 
 		bd_table->dma = dma;
 
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 1379ad4..27af0f0 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1348,6 +1348,7 @@
 {
 	struct dummy		*dum = dum_hcd->dum;
 	struct dummy_request	*req;
+	int			sent = 0;
 
 top:
 	/* if there's no request queued, the device is NAKing; return */
@@ -1385,12 +1386,15 @@
 			if (len == 0)
 				break;
 
-			/* use an extra pass for the final short packet */
-			if (len > ep->ep.maxpacket) {
-				rescan = 1;
-				len -= (len % ep->ep.maxpacket);
+			/* send multiple of maxpacket first, then remainder */
+			if (len >= ep->ep.maxpacket) {
+				is_short = 0;
+				if (len % ep->ep.maxpacket)
+					rescan = 1;
+				len -= len % ep->ep.maxpacket;
+			} else {
+				is_short = 1;
 			}
-			is_short = (len % ep->ep.maxpacket) != 0;
 
 			len = dummy_perform_transfer(urb, req, len);
 
@@ -1399,6 +1403,7 @@
 				req->req.status = len;
 			} else {
 				limit -= len;
+				sent += len;
 				urb->actual_length += len;
 				req->req.actual += len;
 			}
@@ -1421,7 +1426,7 @@
 					*status = -EOVERFLOW;
 				else
 					*status = 0;
-			} else if (!to_host) {
+			} else {
 				*status = 0;
 				if (host_len > dev_len)
 					req->req.status = -EOVERFLOW;
@@ -1429,15 +1434,24 @@
 					req->req.status = 0;
 			}
 
-		/* many requests terminate without a short packet */
+		/*
+		 * many requests terminate without a short packet.
+		 * send a zlp if demanded by flags.
+		 */
 		} else {
-			if (req->req.length == req->req.actual
-					&& !req->req.zero)
-				req->req.status = 0;
-			if (urb->transfer_buffer_length == urb->actual_length
-					&& !(urb->transfer_flags
-						& URB_ZERO_PACKET))
-				*status = 0;
+			if (req->req.length == req->req.actual) {
+				if (req->req.zero && to_host)
+					rescan = 1;
+				else
+					req->req.status = 0;
+			}
+			if (urb->transfer_buffer_length == urb->actual_length) {
+				if (urb->transfer_flags & URB_ZERO_PACKET &&
+				    !to_host)
+					rescan = 1;
+				else
+					*status = 0;
+			}
 		}
 
 		/* device side completion --> continuable */
@@ -1460,7 +1474,7 @@
 		if (rescan)
 			goto top;
 	}
-	return limit;
+	return sent;
 }
 
 static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep)
@@ -1890,7 +1904,7 @@
 		default:
 treat_control_like_bulk:
 			ep->last_io = jiffies;
-			total = transfer(dum_hcd, urb, ep, limit, &status);
+			total -= transfer(dum_hcd, urb, ep, limit, &status);
 			break;
 		}
 
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 8aa2593..b9429bc 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2117,8 +2117,7 @@
 		return -EBUSY;
 
 	gr_dfs_delete(dev);
-	if (dev->desc_pool)
-		dma_pool_destroy(dev->desc_pool);
+	dma_pool_destroy(dev->desc_pool);
 	platform_set_drvdata(pdev, NULL);
 
 	gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 4c48969..dafe74e 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1767,8 +1767,7 @@
 	usb_del_gadget_udc(&u3d->gadget);
 
 	/* free memory allocated in probe */
-	if (u3d->trb_pool)
-		dma_pool_destroy(u3d->trb_pool);
+	dma_pool_destroy(u3d->trb_pool);
 
 	if (u3d->ep_context)
 		dma_free_coherent(&dev->dev, u3d->ep_context_size,
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 339af51..81b6229 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2100,8 +2100,7 @@
 	}
 
 	/* free memory allocated in probe */
-	if (udc->dtd_pool)
-		dma_pool_destroy(udc->dtd_pool);
+	dma_pool_destroy(udc->dtd_pool);
 
 	if (udc->ep_dqh)
 		dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 9a8c936..41f841f 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1498,10 +1498,10 @@
 	 * use Event Data TRBs, and we don't chain in a link TRB on short
 	 * transfers, we're basically dividing by 1.
 	 *
-	 * xHCI 1.0 specification indicates that the Average TRB Length should
-	 * be set to 8 for control endpoints.
+	 * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
+	 * should be set to 8 for control endpoints.
 	 */
-	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
+	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
 		ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
 	else
 		ep_ctx->tx_info |=
@@ -1792,8 +1792,7 @@
 	int size;
 	int i, j, num_ports;
 
-	if (timer_pending(&xhci->cmd_timer))
-		del_timer_sync(&xhci->cmd_timer);
+	del_timer_sync(&xhci->cmd_timer);
 
 	/* Free the Event Ring Segment Table and the actual Event Ring */
 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -2321,6 +2320,10 @@
 
 	INIT_LIST_HEAD(&xhci->cmd_list);
 
+	/* init command timeout timer */
+	setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
+		    (unsigned long)xhci);
+
 	page_size = readl(&xhci->op_regs->page_size);
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			"Supported page size register = 0x%x", page_size);
@@ -2505,10 +2508,6 @@
 			"Wrote ERST address to ir_set 0.");
 	xhci_print_ir_set(xhci, 0);
 
-	/* init command timeout timer */
-	setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
-		    (unsigned long)xhci);
-
 	/*
 	 * XXX: Might need to set the Interrupter Moderation Register to
 	 * something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5590eac..c79d336 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -180,51 +180,6 @@
 				"QUIRK: Resetting on resume");
 }
 
-/*
- * In some Intel xHCI controllers, in order to get D3 working,
- * through a vendor specific SSIC CONFIG register at offset 0x883c,
- * SSIC PORT need to be marked as "unused" before putting xHCI
- * into D3. After D3 exit, the SSIC port need to be marked as "used".
- * Without this change, xHCI might not enter D3 state.
- * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
- * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
- */
-static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
-{
-	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
-	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
-	u32 val;
-	void __iomem *reg;
-
-	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
-		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
-
-		reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
-
-		/* Notify SSIC that SSIC profile programming is not done */
-		val = readl(reg) & ~PROG_DONE;
-		writel(val, reg);
-
-		/* Mark SSIC port as unused(suspend) or used(resume) */
-		val = readl(reg);
-		if (suspend)
-			val |= SSIC_PORT_UNUSED;
-		else
-			val &= ~SSIC_PORT_UNUSED;
-		writel(val, reg);
-
-		/* Notify SSIC that SSIC profile programming is done */
-		val = readl(reg) | PROG_DONE;
-		writel(val, reg);
-		readl(reg);
-	}
-
-	reg = (void __iomem *) xhci->cap_regs + 0x80a4;
-	val = readl(reg);
-	writel(val | BIT(28), reg);
-	readl(reg);
-}
-
 #ifdef CONFIG_ACPI
 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
 {
@@ -345,6 +300,51 @@
 }
 
 #ifdef CONFIG_PM
+/*
+ * In some Intel xHCI controllers, in order to get D3 working,
+ * through a vendor specific SSIC CONFIG register at offset 0x883c,
+ * SSIC PORT need to be marked as "unused" before putting xHCI
+ * into D3. After D3 exit, the SSIC port need to be marked as "used".
+ * Without this change, xHCI might not enter D3 state.
+ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
+ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
+ */
+static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
+	u32 val;
+	void __iomem *reg;
+
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+
+		reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
+
+		/* Notify SSIC that SSIC profile programming is not done */
+		val = readl(reg) & ~PROG_DONE;
+		writel(val, reg);
+
+		/* Mark SSIC port as unused(suspend) or used(resume) */
+		val = readl(reg);
+		if (suspend)
+			val |= SSIC_PORT_UNUSED;
+		else
+			val &= ~SSIC_PORT_UNUSED;
+		writel(val, reg);
+
+		/* Notify SSIC that SSIC profile programming is done */
+		val = readl(reg) | PROG_DONE;
+		writel(val, reg);
+		readl(reg);
+	}
+
+	reg = (void __iomem *) xhci->cap_regs + 0x80a4;
+	val = readl(reg);
+	writel(val | BIT(28), reg);
+	readl(reg);
+}
+
 static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
 {
 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a47a1e8..43291f9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -302,6 +302,15 @@
 	ret = xhci_handshake(&xhci->op_regs->cmd_ring,
 			CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
 	if (ret < 0) {
+		/* we are about to kill xhci, give it one more chance */
+		xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+			      &xhci->op_regs->cmd_ring);
+		udelay(1000);
+		ret = xhci_handshake(&xhci->op_regs->cmd_ring,
+				     CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
+		if (ret == 0)
+			return 0;
+
 		xhci_err(xhci, "Stopped the command ring failed, "
 				"maybe the host is dead\n");
 		xhci->xhc_state |= XHCI_STATE_DYING;
@@ -3461,8 +3470,8 @@
 	if (start_cycle == 0)
 		field |= 0x1;
 
-	/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
-	if (xhci->hci_version == 0x100) {
+	/* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
+	if (xhci->hci_version >= 0x100) {
 		if (urb->transfer_buffer_length > 0) {
 			if (setup->bRequestType & USB_DIR_IN)
 				field |= TRB_TX_TYPE(TRB_DATA_IN);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6b0f4a4..9957bd9 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -146,7 +146,8 @@
 				"waited %u microseconds.\n",
 				XHCI_MAX_HALT_USEC);
 	if (!ret)
-		xhci->xhc_state &= ~XHCI_STATE_HALTED;
+		xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+
 	return ret;
 }
 
@@ -654,15 +655,6 @@
 }
 EXPORT_SYMBOL_GPL(xhci_run);
 
-static void xhci_only_stop_hcd(struct usb_hcd *hcd)
-{
-	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
-	spin_lock_irq(&xhci->lock);
-	xhci_halt(xhci);
-	spin_unlock_irq(&xhci->lock);
-}
-
 /*
  * Stop xHCI driver.
  *
@@ -677,12 +669,14 @@
 	u32 temp;
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
-	if (!usb_hcd_is_primary_hcd(hcd)) {
-		xhci_only_stop_hcd(xhci->shared_hcd);
+	if (xhci->xhc_state & XHCI_STATE_HALTED)
 		return;
-	}
 
+	mutex_lock(&xhci->mutex);
 	spin_lock_irq(&xhci->lock);
+	xhci->xhc_state |= XHCI_STATE_HALTED;
+	xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+
 	/* Make sure the xHC is halted for a USB3 roothub
 	 * (xhci_stop() could be called as part of failed init).
 	 */
@@ -717,6 +711,7 @@
 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
 			"xhci_stop completed - status = %x",
 			readl(&xhci->op_regs->status));
+	mutex_unlock(&xhci->mutex);
 }
 
 /*
@@ -3793,6 +3788,9 @@
 
 	mutex_lock(&xhci->mutex);
 
+	if (xhci->xhc_state)	/* dying or halted */
+		goto out;
+
 	if (!udev->slot_id) {
 		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
 				"Bad Slot ID %d", udev->slot_id);
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 3ad5d19..23c7948 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -472,7 +472,7 @@
 	if (this_time > max)
 		this_time = max;
 
-	memcpy(data, dev->buf, this_time);
+	memcpy(data, dev->buf + dev->used, this_time);
 
 	dev->used += this_time;
 
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 514a6cd..4a518ff 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1051,6 +1051,7 @@
 	 * (c) peripheral initiates, using SRP
 	 */
 	if (musb->port_mode != MUSB_PORT_MODE_HOST &&
+			musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
 			(devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
 		musb->is_active = 1;
 	} else {
@@ -2448,6 +2449,9 @@
 	struct musb	*musb = dev_to_musb(dev);
 	unsigned long	flags;
 
+	musb_platform_disable(musb);
+	musb_generic_disable(musb);
+
 	spin_lock_irqsave(&musb->lock, flags);
 
 	if (is_peripheral_active(musb)) {
@@ -2501,6 +2505,9 @@
 	pm_runtime_disable(dev);
 	pm_runtime_set_active(dev);
 	pm_runtime_enable(dev);
+
+	musb_start(musb);
+
 	return 0;
 }
 
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index d07cafb..e499b86 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -551,6 +551,9 @@
 	} else {
 		cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
 
+		/* delay to drain to cppi dma pipeline for isoch */
+		udelay(250);
+
 		csr = musb_readw(epio, MUSB_RXCSR);
 		csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
 		musb_writew(epio, MUSB_RXCSR, csr);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index a0cfead..84512d1 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -225,8 +225,11 @@
 
 	dsps_writel(reg_base, wrp->epintr_set, epmask);
 	dsps_writel(reg_base, wrp->coreintr_set, coremask);
-	/* start polling for ID change. */
-	mod_timer(&glue->timer, jiffies + msecs_to_jiffies(wrp->poll_timeout));
+	/* start polling for ID change in dual-role idle mode */
+	if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
+			musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
+		mod_timer(&glue->timer, jiffies +
+				msecs_to_jiffies(wrp->poll_timeout));
 	dsps_musb_try_idle(musb, 0);
 }
 
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index 39168fe..b2685e7 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -379,6 +379,8 @@
         {}
 };
 
+MODULE_DEVICE_TABLE(of, ux500_match);
+
 static struct platform_driver ux500_driver = {
 	.probe		= ux500_probe,
 	.remove		= ux500_remove,
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 7d3beee..1731324 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -155,7 +155,7 @@
 config USB_QCOM_8X16_PHY
 	tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support"
 	depends on ARCH_QCOM || COMPILE_TEST
-	depends on RESET_CONTROLLER
+	depends on RESET_CONTROLLER && EXTCON
 	select USB_PHY
 	select USB_ULPI_VIEWPORT
 	help
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index ec6ecd0..5320cb8 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -232,7 +232,8 @@
 		clk_rate = pdata->clk_rate;
 		needs_vcc = pdata->needs_vcc;
 		if (gpio_is_valid(pdata->gpio_reset)) {
-			err = devm_gpio_request_one(dev, pdata->gpio_reset, 0,
+			err = devm_gpio_request_one(dev, pdata->gpio_reset,
+						    GPIOF_ACTIVE_LOW,
 						    dev_name(dev));
 			if (!err)
 				nop->gpiod_reset =
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index 8a55b37..db68156 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -31,6 +31,7 @@
 	{ "isp1301", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, isp1301_id);
 
 static struct i2c_client *isp1301_i2c_client;
 
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 7b98e1d..d82fa36 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -476,6 +476,11 @@
 		.compatible = "renesas,usbhs-r8a7794",
 		.data = (void *)USBHS_TYPE_RCAR_GEN2,
 	},
+	{
+		/* Gen3 is compatible with Gen2 */
+		.compatible = "renesas,usbhs-r8a7795",
+		.data = (void *)USBHS_TYPE_RCAR_GEN2,
+	},
 	{ },
 };
 MODULE_DEVICE_TABLE(of, usbhs_of_match);
@@ -493,7 +498,7 @@
 		return NULL;
 
 	dparam = &info->driver_param;
-	dparam->type = of_id ? (u32)of_id->data : 0;
+	dparam->type = of_id ? (uintptr_t)of_id->data : 0;
 	if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp))
 		dparam->buswait_bwait = tmp;
 	gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6d1941a..6956c4f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -278,6 +278,10 @@
 #define ZTE_PRODUCT_MF622			0x0001
 #define ZTE_PRODUCT_MF628			0x0015
 #define ZTE_PRODUCT_MF626			0x0031
+#define ZTE_PRODUCT_ZM8620_X			0x0396
+#define ZTE_PRODUCT_ME3620_MBIM			0x0426
+#define ZTE_PRODUCT_ME3620_X			0x1432
+#define ZTE_PRODUCT_ME3620_L			0x1433
 #define ZTE_PRODUCT_AC2726			0xfff1
 #define ZTE_PRODUCT_MG880			0xfffd
 #define ZTE_PRODUCT_CDMA_TECH			0xfffe
@@ -544,6 +548,18 @@
 	.sendsetup = BIT(1) | BIT(2) | BIT(3),
 };
 
+static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
+	.reserved = BIT(2) | BIT(3) | BIT(4),
+};
+
+static const struct option_blacklist_info zte_me3620_xl_blacklist = {
+	.reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
+static const struct option_blacklist_info zte_zm8620_x_blacklist = {
+	.reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
 static const struct option_blacklist_info huawei_cdc12_blacklist = {
 	.reserved = BIT(1) | BIT(2),
 };
@@ -1591,6 +1607,14 @@
 	 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
 	 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
+	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
+	 .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
+	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
+	 .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 6c3734d..d3ea90b 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -80,6 +80,8 @@
 static int  whiteheat_firmware_attach(struct usb_serial *serial);
 
 /* function prototypes for the Connect Tech WhiteHEAT serial converter */
+static int whiteheat_probe(struct usb_serial *serial,
+				const struct usb_device_id *id);
 static int  whiteheat_attach(struct usb_serial *serial);
 static void whiteheat_release(struct usb_serial *serial);
 static int  whiteheat_port_probe(struct usb_serial_port *port);
@@ -116,6 +118,7 @@
 	.description =		"Connect Tech - WhiteHEAT",
 	.id_table =		id_table_std,
 	.num_ports =		4,
+	.probe =		whiteheat_probe,
 	.attach =		whiteheat_attach,
 	.release =		whiteheat_release,
 	.port_probe =		whiteheat_port_probe,
@@ -217,6 +220,34 @@
 /*****************************************************************************
  * Connect Tech's White Heat serial driver functions
  *****************************************************************************/
+
+static int whiteheat_probe(struct usb_serial *serial,
+				const struct usb_device_id *id)
+{
+	struct usb_host_interface *iface_desc;
+	struct usb_endpoint_descriptor *endpoint;
+	size_t num_bulk_in = 0;
+	size_t num_bulk_out = 0;
+	size_t min_num_bulk;
+	unsigned int i;
+
+	iface_desc = serial->interface->cur_altsetting;
+
+	for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
+		endpoint = &iface_desc->endpoint[i].desc;
+		if (usb_endpoint_is_bulk_in(endpoint))
+			++num_bulk_in;
+		if (usb_endpoint_is_bulk_out(endpoint))
+			++num_bulk_out;
+	}
+
+	min_num_bulk = COMMAND_PORT + 1;
+	if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
+		return -ENODEV;
+
+	return 0;
+}
+
 static int whiteheat_attach(struct usb_serial *serial)
 {
 	struct usb_serial_port *command_port;
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index 0e5fde1..9f9a7be 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -752,7 +752,7 @@
 	if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) {
 		dev_err(dev, "Invalid waveform\n");
 		err = -EINVAL;
-		goto err_failed;
+		goto err_fw;
 	}
 
 	mutex_lock(&(par->io_lock));
@@ -762,13 +762,15 @@
 	mutex_unlock(&(par->io_lock));
 	if (err < 0) {
 		dev_err(dev, "Failed to store broadsheet waveform\n");
-		goto err_failed;
+		goto err_fw;
 	}
 
 	dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size);
 
-	return len;
+	err = len;
 
+err_fw:
+	release_firmware(fw_entry);
 err_failed:
 	return err;
 }
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index 7fa2e6f..b335c1a 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1628,9 +1628,16 @@
 static int fsl_diu_resume(struct platform_device *ofdev)
 {
 	struct fsl_diu_data *data;
+	unsigned int i;
 
 	data = dev_get_drvdata(&ofdev->dev);
-	enable_lcdc(data->fsl_diu_info);
+
+	fsl_diu_enable_interrupts(data);
+	update_lcdc(data->fsl_diu_info);
+	for (i = 0; i < NUM_AOIS; i++) {
+		if (data->mfb[i].count)
+			fsl_diu_enable_panel(&data->fsl_diu_info[i]);
+	}
 
 	return 0;
 }
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 9b8bebd..f9ec5c0 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -831,6 +831,7 @@
 	{ .compatible = "fujitsu,coral", },
 	{ /* end */ }
 };
+MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl);
 
 static struct platform_driver of_platform_mb862xxfb_driver = {
 	.driver = {
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
index a8ce920..d811e6d 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
@@ -294,7 +294,7 @@
 
 	adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
 	if (adapter_node) {
-		adapter = of_find_i2c_adapter_by_node(adapter_node);
+		adapter = of_get_i2c_adapter_by_node(adapter_node);
 		if (adapter == NULL) {
 			dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
 			omap_dss_put_device(ddata->in);
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
index 90cbc4c..c581231 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
@@ -898,6 +898,7 @@
 	{ .compatible = "omapdss,sony,acx565akm", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, acx565akm_of_match);
 
 static struct spi_driver acx565akm_driver = {
 	.driver = {
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 7ed9a22..01b43e9 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -226,7 +226,7 @@
 	writemmr(par, DST1, point(x, y));
 	writemmr(par, DST2, point(x + w - 1, y + h - 1));
 
-	memcpy(par->io_virt + 0x10000, data, 4 * size);
+	iowrite32_rep(par->io_virt + 0x10000, data, size);
 }
 
 static void blade_copy_rect(struct tridentfb_par *par,
@@ -673,8 +673,14 @@
 static inline void set_lwidth(struct tridentfb_par *par, int width)
 {
 	write3X4(par, VGA_CRTC_OFFSET, width & 0xFF);
-	write3X4(par, AddColReg,
-		 (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
+	/* chips older than TGUI9660 have only 1 width bit in AddColReg */
+	/* touching the other one breaks I2C/DDC */
+	if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320)
+		write3X4(par, AddColReg,
+		     (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4));
+	else
+		write3X4(par, AddColReg,
+		     (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
 }
 
 /* For resolutions smaller than FP resolution stretch */
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 32d8275..8a1076b 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -210,6 +210,7 @@
 			 */
 			pr_err("%s: error in timing %d\n",
 				of_node_full_name(np), disp->num_timings + 1);
+			kfree(dt);
 			goto timingfail;
 		}
 
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index c68edc1..79e1aa1 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -817,8 +817,9 @@
 	tristate "Intel TCO Timer/Watchdog"
 	depends on (X86 || IA64) && PCI
 	select WATCHDOG_CORE
+	depends on I2C || I2C=n
 	select LPC_ICH if !EXPERT
-	select I2C_I801 if !EXPERT
+	select I2C_I801 if !EXPERT && I2C
 	---help---
 	  Hardware driver for the intel TCO timer based watchdog devices.
 	  These drivers are included in the Intel 82801 I/O Controller
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 66c3e65..8a5ce5b 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -36,6 +36,13 @@
 #define PM_RSTC_WRCFG_FULL_RESET	0x00000020
 #define PM_RSTC_RESET			0x00000102
 
+/*
+ * The Raspberry Pi firmware uses the RSTS register to know which partiton
+ * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10.
+ * Partiton 63 is a special partition used by the firmware to indicate halt.
+ */
+#define PM_RSTS_RASPBERRYPI_HALT	0x555
+
 #define SECS_TO_WDOG_TICKS(x) ((x) << 16)
 #define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
 
@@ -151,8 +158,7 @@
 	 * hard reset.
 	 */
 	val = readl_relaxed(wdt->base + PM_RSTS);
-	val &= PM_RSTC_WRCFG_CLR;
-	val |= PM_PASSWORD | PM_RSTS_HADWRH_SET;
+	val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT;
 	writel_relaxed(val, wdt->base + PM_RSTS);
 
 	/* Continue with normal reset mechanism */
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index cc1bdfc..006e234 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -303,6 +303,7 @@
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, gef_wdt_ids);
 
 static struct platform_driver gef_wdt_driver = {
 	.driver = {
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c
index 6901300..098fa9c 100644
--- a/drivers/watchdog/mena21_wdt.c
+++ b/drivers/watchdog/mena21_wdt.c
@@ -253,6 +253,7 @@
 	{ .compatible = "men,a021-wdt" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, a21_wdt_ids);
 
 static struct platform_driver a21_wdt_driver = {
 	.probe = a21_wdt_probe,
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 2789da2..60b0605 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -168,6 +168,7 @@
 	{ .compatible = "moxa,moxart-watchdog" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, moxart_watchdog_match);
 
 static struct platform_driver moxart_wdt_driver = {
 	.probe      = moxart_wdt_probe,
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index ecbc63d..9a2ec79 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1828,7 +1828,6 @@
 	int found = 0;
 	struct extent_buffer *eb;
 	struct btrfs_inode_extref *extref;
-	struct extent_buffer *leaf;
 	u32 item_size;
 	u32 cur_offset;
 	unsigned long ptr;
@@ -1856,9 +1855,8 @@
 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
 		btrfs_release_path(path);
 
-		leaf = path->nodes[0];
-		item_size = btrfs_item_size_nr(leaf, slot);
-		ptr = btrfs_item_ptr_offset(leaf, slot);
+		item_size = btrfs_item_size_nr(eb, slot);
+		ptr = btrfs_item_ptr_offset(eb, slot);
 		cur_offset = 0;
 
 		while (cur_offset < item_size) {
@@ -1872,7 +1870,7 @@
 			if (ret)
 				break;
 
-			cur_offset += btrfs_inode_extref_name_len(leaf, extref);
+			cur_offset += btrfs_inode_extref_name_len(eb, extref);
 			cur_offset += sizeof(*extref);
 		}
 		btrfs_tree_read_unlock_blocking(eb);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 295795a..1e60d00 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2847,6 +2847,8 @@
 	    !extent_buffer_uptodate(chunk_root->node)) {
 		printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
 		       sb->s_id);
+		if (!IS_ERR(chunk_root->node))
+			free_extent_buffer(chunk_root->node);
 		chunk_root->node = NULL;
 		goto fail_tree_roots;
 	}
@@ -2885,6 +2887,8 @@
 	    !extent_buffer_uptodate(tree_root->node)) {
 		printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
 		       sb->s_id);
+		if (!IS_ERR(tree_root->node))
+			free_extent_buffer(tree_root->node);
 		tree_root->node = NULL;
 		goto recovery_tree_root;
 	}
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 8d05220..2513a7f 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -112,11 +112,11 @@
 	u32 generation;
 
 	if (fh_type == FILEID_BTRFS_WITH_PARENT) {
-		if (fh_len !=  BTRFS_FID_SIZE_CONNECTABLE)
+		if (fh_len <  BTRFS_FID_SIZE_CONNECTABLE)
 			return NULL;
 		root_objectid = fid->root_objectid;
 	} else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) {
-		if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT)
+		if (fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT)
 			return NULL;
 		root_objectid = fid->parent_root_objectid;
 	} else
@@ -136,11 +136,11 @@
 	u32 generation;
 
 	if ((fh_type != FILEID_BTRFS_WITH_PARENT ||
-	     fh_len != BTRFS_FID_SIZE_CONNECTABLE) &&
+	     fh_len < BTRFS_FID_SIZE_CONNECTABLE) &&
 	    (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT ||
-	     fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) &&
+	     fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) &&
 	    (fh_type != FILEID_BTRFS_WITHOUT_PARENT ||
-	     fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE))
+	     fh_len < BTRFS_FID_SIZE_NON_CONNECTABLE))
 		return NULL;
 
 	objectid = fid->objectid;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9f96042..601d7d4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2828,6 +2828,7 @@
 	struct btrfs_delayed_ref_head *head;
 	int ret;
 	int run_all = count == (unsigned long)-1;
+	bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
 
 	/* We'll clean this up in btrfs_cleanup_transaction */
 	if (trans->aborted)
@@ -2844,6 +2845,7 @@
 #ifdef SCRAMBLE_DELAYED_REFS
 	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
 #endif
+	trans->can_flush_pending_bgs = false;
 	ret = __btrfs_run_delayed_refs(trans, root, count);
 	if (ret < 0) {
 		btrfs_abort_transaction(trans, root, ret);
@@ -2893,6 +2895,7 @@
 	}
 out:
 	assert_qgroups_uptodate(trans);
+	trans->can_flush_pending_bgs = can_flush_pending_bgs;
 	return 0;
 }
 
@@ -4306,7 +4309,8 @@
 	 * the block groups that were made dirty during the lifetime of the
 	 * transaction.
 	 */
-	if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
+	if (trans->can_flush_pending_bgs &&
+	    trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
 		btrfs_create_pending_block_groups(trans, trans->root);
 		btrfs_trans_release_chunk_metadata(trans);
 	}
@@ -9560,7 +9564,9 @@
 	struct btrfs_block_group_item item;
 	struct btrfs_key key;
 	int ret = 0;
+	bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
 
+	trans->can_flush_pending_bgs = false;
 	list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
 		if (ret)
 			goto next;
@@ -9581,6 +9587,7 @@
 next:
 		list_del_init(&block_group->bg_list);
 	}
+	trans->can_flush_pending_bgs = can_flush_pending_bgs;
 }
 
 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e2357e3..3915c94 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3132,12 +3132,12 @@
 					     get_extent_t *get_extent,
 					     struct extent_map **em_cached,
 					     struct bio **bio, int mirror_num,
-					     unsigned long *bio_flags, int rw)
+					     unsigned long *bio_flags, int rw,
+					     u64 *prev_em_start)
 {
 	struct inode *inode;
 	struct btrfs_ordered_extent *ordered;
 	int index;
-	u64 prev_em_start = (u64)-1;
 
 	inode = pages[0]->mapping->host;
 	while (1) {
@@ -3153,7 +3153,7 @@
 
 	for (index = 0; index < nr_pages; index++) {
 		__do_readpage(tree, pages[index], get_extent, em_cached, bio,
-			      mirror_num, bio_flags, rw, &prev_em_start);
+			      mirror_num, bio_flags, rw, prev_em_start);
 		page_cache_release(pages[index]);
 	}
 }
@@ -3163,7 +3163,8 @@
 			       int nr_pages, get_extent_t *get_extent,
 			       struct extent_map **em_cached,
 			       struct bio **bio, int mirror_num,
-			       unsigned long *bio_flags, int rw)
+			       unsigned long *bio_flags, int rw,
+			       u64 *prev_em_start)
 {
 	u64 start = 0;
 	u64 end = 0;
@@ -3184,7 +3185,7 @@
 						  index - first_index, start,
 						  end, get_extent, em_cached,
 						  bio, mirror_num, bio_flags,
-						  rw);
+						  rw, prev_em_start);
 			start = page_start;
 			end = start + PAGE_CACHE_SIZE - 1;
 			first_index = index;
@@ -3195,7 +3196,8 @@
 		__do_contiguous_readpages(tree, &pages[first_index],
 					  index - first_index, start,
 					  end, get_extent, em_cached, bio,
-					  mirror_num, bio_flags, rw);
+					  mirror_num, bio_flags, rw,
+					  prev_em_start);
 }
 
 static int __extent_read_full_page(struct extent_io_tree *tree,
@@ -4207,6 +4209,7 @@
 	struct page *page;
 	struct extent_map *em_cached = NULL;
 	int nr = 0;
+	u64 prev_em_start = (u64)-1;
 
 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
 		page = list_entry(pages->prev, struct page, lru);
@@ -4223,12 +4226,12 @@
 		if (nr < ARRAY_SIZE(pagepool))
 			continue;
 		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
-				   &bio, 0, &bio_flags, READ);
+				   &bio, 0, &bio_flags, READ, &prev_em_start);
 		nr = 0;
 	}
 	if (nr)
 		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
-				   &bio, 0, &bio_flags, READ);
+				   &bio, 0, &bio_flags, READ, &prev_em_start);
 
 	if (em_cached)
 		free_extent_map(em_cached);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0adf542..3e3e613 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -4639,6 +4639,11 @@
 		bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
 	}
 
+	if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
+		ret = -EINVAL;
+		goto out_bargs;
+	}
+
 do_balance:
 	/*
 	 * Ownership of bctl and mutually_exclusive_operation_running
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index aa72bfd..a739b82 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1920,10 +1920,12 @@
 	/*
 	 * We know that it is or will be overwritten. Check this now.
 	 * The current inode being processed might have been the one that caused
-	 * inode 'ino' to be orphanized, therefore ow_inode can actually be the
-	 * same as sctx->send_progress.
+	 * inode 'ino' to be orphanized, therefore check if ow_inode matches
+	 * the current inode being processed.
 	 */
-	if (ow_inode <= sctx->send_progress)
+	if ((ow_inode < sctx->send_progress) ||
+	    (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
+	     gen == sctx->cur_inode_gen))
 		ret = 1;
 	else
 		ret = 0;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 74bc333..a5b0644 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -557,6 +557,7 @@
 	h->delayed_ref_elem.seq = 0;
 	h->type = type;
 	h->allocating_chunk = false;
+	h->can_flush_pending_bgs = true;
 	h->reloc_reserved = false;
 	h->sync = false;
 	INIT_LIST_HEAD(&h->qgroup_ref_list);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 87964bf..a994bb0 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -118,6 +118,7 @@
 	short aborted;
 	short adding_csums;
 	bool allocating_chunk;
+	bool can_flush_pending_bgs;
 	bool reloc_reserved;
 	bool sync;
 	unsigned int type;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 2ca784a..595279a 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -376,6 +376,14 @@
 #define BTRFS_BALANCE_ARGS_VRANGE	(1ULL << 4)
 #define BTRFS_BALANCE_ARGS_LIMIT	(1ULL << 5)
 
+#define BTRFS_BALANCE_ARGS_MASK			\
+	(BTRFS_BALANCE_ARGS_PROFILES |		\
+	 BTRFS_BALANCE_ARGS_USAGE |		\
+	 BTRFS_BALANCE_ARGS_DEVID | 		\
+	 BTRFS_BALANCE_ARGS_DRANGE |		\
+	 BTRFS_BALANCE_ARGS_VRANGE |		\
+	 BTRFS_BALANCE_ARGS_LIMIT)
+
 /*
  * Profile changing flags.  When SOFT is set we won't relocate chunk if
  * it already has the target profile (even though it may be
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index aa0dc25..afa09fc 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -444,6 +444,48 @@
 	return 0;
 }
 
+/* Server has provided av pairs/target info in the type 2 challenge
+ * packet and we have plucked it and stored within smb session.
+ * We parse that blob here to find the server given timestamp
+ * as part of ntlmv2 authentication (or local current time as
+ * default in case of failure)
+ */
+static __le64
+find_timestamp(struct cifs_ses *ses)
+{
+	unsigned int attrsize;
+	unsigned int type;
+	unsigned int onesize = sizeof(struct ntlmssp2_name);
+	unsigned char *blobptr;
+	unsigned char *blobend;
+	struct ntlmssp2_name *attrptr;
+
+	if (!ses->auth_key.len || !ses->auth_key.response)
+		return 0;
+
+	blobptr = ses->auth_key.response;
+	blobend = blobptr + ses->auth_key.len;
+
+	while (blobptr + onesize < blobend) {
+		attrptr = (struct ntlmssp2_name *) blobptr;
+		type = le16_to_cpu(attrptr->type);
+		if (type == NTLMSSP_AV_EOL)
+			break;
+		blobptr += 2; /* advance attr type */
+		attrsize = le16_to_cpu(attrptr->length);
+		blobptr += 2; /* advance attr size */
+		if (blobptr + attrsize > blobend)
+			break;
+		if (type == NTLMSSP_AV_TIMESTAMP) {
+			if (attrsize == sizeof(u64))
+				return *((__le64 *)blobptr);
+		}
+		blobptr += attrsize; /* advance attr value */
+	}
+
+	return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+}
+
 static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
 			    const struct nls_table *nls_cp)
 {
@@ -641,6 +683,7 @@
 	struct ntlmv2_resp *ntlmv2;
 	char ntlmv2_hash[16];
 	unsigned char *tiblob = NULL; /* target info blob */
+	__le64 rsp_timestamp;
 
 	if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
 		if (!ses->domainName) {
@@ -659,6 +702,12 @@
 		}
 	}
 
+	/* Must be within 5 minutes of the server (or in range +/-2h
+	 * in case of Mac OS X), so simply carry over server timestamp
+	 * (as Windows 7 does)
+	 */
+	rsp_timestamp = find_timestamp(ses);
+
 	baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
 	tilen = ses->auth_key.len;
 	tiblob = ses->auth_key.response;
@@ -675,8 +724,8 @@
 			(ses->auth_key.response + CIFS_SESS_KEY_SIZE);
 	ntlmv2->blob_signature = cpu_to_le32(0x00000101);
 	ntlmv2->reserved = 0;
-	/* Must be within 5 minutes of the server */
-	ntlmv2->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+	ntlmv2->time = rsp_timestamp;
+
 	get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
 	ntlmv2->reserved2 = 0;
 
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 27aea11..c3cc160 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -136,5 +136,5 @@
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.07"
+#define CIFS_VERSION   "2.08"
 #endif				/* _CIFSFS_H */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e2a6af1..62203c3 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3380,6 +3380,7 @@
 	struct page *page, *tpage;
 	unsigned int expected_index;
 	int rc;
+	gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
 
 	INIT_LIST_HEAD(tmplist);
 
@@ -3392,7 +3393,7 @@
 	 */
 	__set_page_locked(page);
 	rc = add_to_page_cache_locked(page, mapping,
-				      page->index, GFP_KERNEL);
+				      page->index, gfp);
 
 	/* give up if we can't stick it in the cache */
 	if (rc) {
@@ -3418,8 +3419,7 @@
 			break;
 
 		__set_page_locked(page);
-		if (add_to_page_cache_locked(page, mapping, page->index,
-								GFP_KERNEL)) {
+		if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
 			__clear_page_locked(page);
 			break;
 		}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index f621b44..6b66dd5 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2034,7 +2034,6 @@
 	struct tcon_link *tlink = NULL;
 	struct cifs_tcon *tcon = NULL;
 	struct TCP_Server_Info *server;
-	struct cifs_io_parms io_parms;
 
 	/*
 	 * To avoid spurious oplock breaks from server, in the case of
@@ -2056,18 +2055,6 @@
 			rc = -ENOSYS;
 		cifsFileInfo_put(open_file);
 		cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc);
-		if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
-			unsigned int bytes_written;
-
-			io_parms.netfid = open_file->fid.netfid;
-			io_parms.pid = open_file->pid;
-			io_parms.tcon = tcon;
-			io_parms.offset = 0;
-			io_parms.length = attrs->ia_size;
-			rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
-					  NULL, NULL, 1);
-			cifs_dbg(FYI, "Wrt seteof rc %d\n", rc);
-		}
 	} else
 		rc = -EINVAL;
 
@@ -2093,28 +2080,7 @@
 	else
 		rc = -ENOSYS;
 	cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
-	if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
-		__u16 netfid;
-		int oplock = 0;
 
-		rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN,
-				   GENERIC_WRITE, CREATE_NOT_DIR, &netfid,
-				   &oplock, NULL, cifs_sb->local_nls,
-				   cifs_remap(cifs_sb));
-		if (rc == 0) {
-			unsigned int bytes_written;
-
-			io_parms.netfid = netfid;
-			io_parms.pid = current->tgid;
-			io_parms.tcon = tcon;
-			io_parms.offset = 0;
-			io_parms.length = attrs->ia_size;
-			rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL,
-					  NULL,  1);
-			cifs_dbg(FYI, "wrt seteof rc %d\n", rc);
-			CIFSSMBClose(xid, tcon, netfid);
-		}
-	}
 	if (tlink)
 		cifs_put_tlink(tlink);
 
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index df91bcf..18da19f 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -50,9 +50,13 @@
 		break;
 	default:
 		server->echoes = true;
-		server->oplocks = true;
+		if (enable_oplocks) {
+			server->oplocks = true;
+			server->oplock_credits = 1;
+		} else
+			server->oplocks = false;
+
 		server->echo_credits = 1;
-		server->oplock_credits = 1;
 	}
 	server->credits -= server->echo_credits + server->oplock_credits;
 	return 0;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 070fb2a..597a417 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -46,6 +46,7 @@
 #include "smb2status.h"
 #include "smb2glob.h"
 #include "cifspdu.h"
+#include "cifs_spnego.h"
 
 /*
  *  The following table defines the expected "StructureSize" of SMB2 requests
@@ -486,19 +487,15 @@
 		cifs_dbg(FYI, "missing security blob on negprot\n");
 
 	rc = cifs_enable_signing(server, ses->sign);
-#ifdef CONFIG_SMB2_ASN1  /* BB REMOVEME when updated asn1.c ready */
 	if (rc)
 		goto neg_exit;
-	if (blob_length)
+	if (blob_length) {
 		rc = decode_negTokenInit(security_blob, blob_length, server);
-	if (rc == 1)
-		rc = 0;
-	else if (rc == 0) {
-		rc = -EIO;
-		goto neg_exit;
+		if (rc == 1)
+			rc = 0;
+		else if (rc == 0)
+			rc = -EIO;
 	}
-#endif
-
 neg_exit:
 	free_rsp_buf(resp_buftype, rsp);
 	return rc;
@@ -592,7 +589,8 @@
 	__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
 	struct TCP_Server_Info *server = ses->server;
 	u16 blob_length = 0;
-	char *security_blob;
+	struct key *spnego_key = NULL;
+	char *security_blob = NULL;
 	char *ntlmssp_blob = NULL;
 	bool use_spnego = false; /* else use raw ntlmssp */
 
@@ -620,7 +618,8 @@
 	ses->ntlmssp->sesskey_per_smbsess = true;
 
 	/* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
-	ses->sectype = RawNTLMSSP;
+	if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
+		ses->sectype = RawNTLMSSP;
 
 ssetup_ntlmssp_authenticate:
 	if (phase == NtLmChallenge)
@@ -649,7 +648,48 @@
 	iov[0].iov_base = (char *)req;
 	/* 4 for rfc1002 length field and 1 for pad */
 	iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
-	if (phase == NtLmNegotiate) {
+
+	if (ses->sectype == Kerberos) {
+#ifdef CONFIG_CIFS_UPCALL
+		struct cifs_spnego_msg *msg;
+
+		spnego_key = cifs_get_spnego_key(ses);
+		if (IS_ERR(spnego_key)) {
+			rc = PTR_ERR(spnego_key);
+			spnego_key = NULL;
+			goto ssetup_exit;
+		}
+
+		msg = spnego_key->payload.data;
+		/*
+		 * check version field to make sure that cifs.upcall is
+		 * sending us a response in an expected form
+		 */
+		if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
+			cifs_dbg(VFS,
+				  "bad cifs.upcall version. Expected %d got %d",
+				  CIFS_SPNEGO_UPCALL_VERSION, msg->version);
+			rc = -EKEYREJECTED;
+			goto ssetup_exit;
+		}
+		ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+						 GFP_KERNEL);
+		if (!ses->auth_key.response) {
+			cifs_dbg(VFS,
+				"Kerberos can't allocate (%u bytes) memory",
+				msg->sesskey_len);
+			rc = -ENOMEM;
+			goto ssetup_exit;
+		}
+		ses->auth_key.len = msg->sesskey_len;
+		blob_length = msg->secblob_len;
+		iov[1].iov_base = msg->data + msg->sesskey_len;
+		iov[1].iov_len = blob_length;
+#else
+		rc = -EOPNOTSUPP;
+		goto ssetup_exit;
+#endif /* CONFIG_CIFS_UPCALL */
+	} else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */
 		ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
 				       GFP_KERNEL);
 		if (ntlmssp_blob == NULL) {
@@ -672,6 +712,8 @@
 			/* with raw NTLMSSP we don't encapsulate in SPNEGO */
 			security_blob = ntlmssp_blob;
 		}
+		iov[1].iov_base = security_blob;
+		iov[1].iov_len = blob_length;
 	} else if (phase == NtLmAuthenticate) {
 		req->hdr.SessionId = ses->Suid;
 		ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
@@ -699,6 +741,8 @@
 		} else {
 			security_blob = ntlmssp_blob;
 		}
+		iov[1].iov_base = security_blob;
+		iov[1].iov_len = blob_length;
 	} else {
 		cifs_dbg(VFS, "illegal ntlmssp phase\n");
 		rc = -EIO;
@@ -710,8 +754,6 @@
 				cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
 					    1 /* pad */ - 4 /* rfc1001 len */);
 	req->SecurityBufferLength = cpu_to_le16(blob_length);
-	iov[1].iov_base = security_blob;
-	iov[1].iov_len = blob_length;
 
 	inc_rfc1001_len(req, blob_length - 1 /* pad */);
 
@@ -722,6 +764,7 @@
 
 	kfree(security_blob);
 	rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
+	ses->Suid = rsp->hdr.SessionId;
 	if (resp_buftype != CIFS_NO_BUFFER &&
 	    rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
 		if (phase != NtLmNegotiate) {
@@ -739,7 +782,6 @@
 		/* NTLMSSP Negotiate sent now processing challenge (response) */
 		phase = NtLmChallenge; /* process ntlmssp challenge */
 		rc = 0; /* MORE_PROCESSING is not an error here but expected */
-		ses->Suid = rsp->hdr.SessionId;
 		rc = decode_ntlmssp_challenge(rsp->Buffer,
 				le16_to_cpu(rsp->SecurityBufferLength), ses);
 	}
@@ -796,6 +838,10 @@
 		kfree(ses->auth_key.response);
 		ses->auth_key.response = NULL;
 	}
+	if (spnego_key) {
+		key_invalidate(spnego_key);
+		key_put(spnego_key);
+	}
 	kfree(ses->ntlmssp);
 
 	return rc;
@@ -876,6 +922,12 @@
 	if (tcon && tcon->bad_network_name)
 		return -ENOENT;
 
+	if ((tcon && tcon->seal) &&
+	    ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
+		cifs_dbg(VFS, "encryption requested but no server support");
+		return -EOPNOTSUPP;
+	}
+
 	unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
 	if (unc_path == NULL)
 		return -ENOMEM;
@@ -955,6 +1007,8 @@
 	    ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
 		cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
 	init_copy_chunk_defaults(tcon);
+	if (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA)
+		cifs_dbg(VFS, "Encrypted shares not supported");
 	if (tcon->ses->server->ops->validate_negotiate)
 		rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
 tcon_exit:
diff --git a/fs/dax.c b/fs/dax.c
index 7ae6df7..a86d3cc 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -285,6 +285,7 @@
 static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
 			struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+	struct address_space *mapping = inode->i_mapping;
 	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
 	unsigned long vaddr = (unsigned long)vmf->virtual_address;
 	void __pmem *addr;
@@ -292,6 +293,8 @@
 	pgoff_t size;
 	int error;
 
+	i_mmap_lock_read(mapping);
+
 	/*
 	 * Check truncate didn't happen while we were allocating a block.
 	 * If it did, this block may or may not be still allocated to the
@@ -321,6 +324,8 @@
 	error = vm_insert_mixed(vma, vaddr, pfn);
 
  out:
+	i_mmap_unlock_read(mapping);
+
 	return error;
 }
 
@@ -382,17 +387,15 @@
 			 * from a read fault and we've raced with a truncate
 			 */
 			error = -EIO;
-			goto unlock;
+			goto unlock_page;
 		}
-	} else {
-		i_mmap_lock_write(mapping);
 	}
 
 	error = get_block(inode, block, &bh, 0);
 	if (!error && (bh.b_size < PAGE_SIZE))
 		error = -EIO;		/* fs corruption? */
 	if (error)
-		goto unlock;
+		goto unlock_page;
 
 	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
 		if (vmf->flags & FAULT_FLAG_WRITE) {
@@ -403,9 +406,8 @@
 			if (!error && (bh.b_size < PAGE_SIZE))
 				error = -EIO;
 			if (error)
-				goto unlock;
+				goto unlock_page;
 		} else {
-			i_mmap_unlock_write(mapping);
 			return dax_load_hole(mapping, page, vmf);
 		}
 	}
@@ -417,15 +419,17 @@
 		else
 			clear_user_highpage(new_page, vaddr);
 		if (error)
-			goto unlock;
+			goto unlock_page;
 		vmf->page = page;
 		if (!page) {
+			i_mmap_lock_read(mapping);
 			/* Check we didn't race with truncate */
 			size = (i_size_read(inode) + PAGE_SIZE - 1) >>
 								PAGE_SHIFT;
 			if (vmf->pgoff >= size) {
+				i_mmap_unlock_read(mapping);
 				error = -EIO;
-				goto unlock;
+				goto out;
 			}
 		}
 		return VM_FAULT_LOCKED;
@@ -461,8 +465,6 @@
 			WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
 	}
 
-	if (!page)
-		i_mmap_unlock_write(mapping);
  out:
 	if (error == -ENOMEM)
 		return VM_FAULT_OOM | major;
@@ -471,14 +473,11 @@
 		return VM_FAULT_SIGBUS | major;
 	return VM_FAULT_NOPAGE | major;
 
- unlock:
+ unlock_page:
 	if (page) {
 		unlock_page(page);
 		page_cache_release(page);
-	} else {
-		i_mmap_unlock_write(mapping);
 	}
-
 	goto out;
 }
 EXPORT_SYMBOL(__dax_fault);
@@ -556,10 +555,10 @@
 	block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
 
 	bh.b_size = PMD_SIZE;
-	i_mmap_lock_write(mapping);
 	length = get_block(inode, block, &bh, write);
 	if (length)
 		return VM_FAULT_SIGBUS;
+	i_mmap_lock_read(mapping);
 
 	/*
 	 * If the filesystem isn't willing to tell us the length of a hole,
@@ -569,24 +568,14 @@
 	if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
 		goto fallback;
 
-	if (buffer_unwritten(&bh) || buffer_new(&bh)) {
-		int i;
-		for (i = 0; i < PTRS_PER_PMD; i++)
-			clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
-		wmb_pmem();
-		count_vm_event(PGMAJFAULT);
-		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
-		result |= VM_FAULT_MAJOR;
-	}
-
 	/*
 	 * If we allocated new storage, make sure no process has any
 	 * zero pages covering this hole
 	 */
 	if (buffer_new(&bh)) {
-		i_mmap_unlock_write(mapping);
+		i_mmap_unlock_read(mapping);
 		unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
-		i_mmap_lock_write(mapping);
+		i_mmap_lock_read(mapping);
 	}
 
 	/*
@@ -633,15 +622,25 @@
 		if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
 			goto fallback;
 
+		if (buffer_unwritten(&bh) || buffer_new(&bh)) {
+			int i;
+			for (i = 0; i < PTRS_PER_PMD; i++)
+				clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
+			wmb_pmem();
+			count_vm_event(PGMAJFAULT);
+			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+			result |= VM_FAULT_MAJOR;
+		}
+
 		result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
 	}
 
  out:
+	i_mmap_unlock_read(mapping);
+
 	if (buffer_unwritten(&bh))
 		complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
 
-	i_mmap_unlock_write(mapping);
-
 	return result;
 
  fallback:
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 47728da..b46e9fc 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -63,7 +63,7 @@
 	  If unsure, say N.
 
 config EXT4_USE_FOR_EXT2
-	bool "Use ext4 for ext2/ext3 file systems"
+	bool "Use ext4 for ext2 file systems"
 	depends on EXT4_FS
 	depends on EXT2_FS=n
 	default y
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index e26803f..560af04 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -165,8 +165,8 @@
 		if (pages) {
 			page = list_entry(pages->prev, struct page, lru);
 			list_del(&page->lru);
-			if (add_to_page_cache_lru(page, mapping,
-						  page->index, GFP_KERNEL))
+			if (add_to_page_cache_lru(page, mapping, page->index,
+					GFP_KERNEL & mapping_gfp_mask(mapping)))
 				goto next_page;
 		}
 
diff --git a/fs/mpage.c b/fs/mpage.c
index 778a4dd..a7c3427 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -139,7 +139,8 @@
 static struct bio *
 do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
 		sector_t *last_block_in_bio, struct buffer_head *map_bh,
-		unsigned long *first_logical_block, get_block_t get_block)
+		unsigned long *first_logical_block, get_block_t get_block,
+		gfp_t gfp)
 {
 	struct inode *inode = page->mapping->host;
 	const unsigned blkbits = inode->i_blkbits;
@@ -277,8 +278,7 @@
 				goto out;
 		}
 		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
-				min_t(int, nr_pages, BIO_MAX_PAGES),
-				GFP_KERNEL);
+				min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
 		if (bio == NULL)
 			goto confused;
 	}
@@ -361,6 +361,7 @@
 	sector_t last_block_in_bio = 0;
 	struct buffer_head map_bh;
 	unsigned long first_logical_block = 0;
+	gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
 
 	map_bh.b_state = 0;
 	map_bh.b_size = 0;
@@ -370,12 +371,13 @@
 		prefetchw(&page->flags);
 		list_del(&page->lru);
 		if (!add_to_page_cache_lru(page, mapping,
-					page->index, GFP_KERNEL)) {
+					page->index,
+					gfp)) {
 			bio = do_mpage_readpage(bio, page,
 					nr_pages - page_idx,
 					&last_block_in_bio, &map_bh,
 					&first_logical_block,
-					get_block);
+					get_block, gfp);
 		}
 		page_cache_release(page);
 	}
@@ -395,11 +397,12 @@
 	sector_t last_block_in_bio = 0;
 	struct buffer_head map_bh;
 	unsigned long first_logical_block = 0;
+	gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping);
 
 	map_bh.b_state = 0;
 	map_bh.b_size = 0;
 	bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
-			&map_bh, &first_logical_block, get_block);
+			&map_bh, &first_logical_block, get_block, gfp);
 	if (bio)
 		mpage_bio_submit(READ, bio);
 	return 0;
diff --git a/fs/namei.c b/fs/namei.c
index 726d211..33e9495 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1558,8 +1558,6 @@
 		negative = d_is_negative(dentry);
 		if (read_seqcount_retry(&dentry->d_seq, seq))
 			return -ECHILD;
-		if (negative)
-			return -ENOENT;
 
 		/*
 		 * This sequence count validates that the parent had no
@@ -1580,6 +1578,12 @@
 				goto unlazy;
 			}
 		}
+		/*
+		 * Note: do negative dentry check after revalidation in
+		 * case that drops it.
+		 */
+		if (negative)
+			return -ENOENT;
 		path->mnt = mnt;
 		path->dentry = dentry;
 		if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f93b9cd..5133bb1 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1458,12 +1458,18 @@
 	if (delegation)
 		delegation_flags = delegation->flags;
 	rcu_read_unlock();
-	if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+	switch (data->o_arg.claim) {
+	default:
+		break;
+	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
 		pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
 				   "returning a delegation for "
 				   "OPEN(CLAIM_DELEGATE_CUR)\n",
 				   clp->cl_hostname);
-	} else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+		return;
+	}
+	if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
 		nfs_inode_set_delegation(state->inode,
 					 data->owner->so_cred,
 					 &data->o_res);
@@ -1771,6 +1777,9 @@
 	if (IS_ERR(opendata))
 		return PTR_ERR(opendata);
 	nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
+	write_seqlock(&state->seqlock);
+	nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+	write_sequnlock(&state->seqlock);
 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
 	switch (type & (FMODE_READ|FMODE_WRITE)) {
 	case FMODE_READ|FMODE_WRITE:
@@ -1863,6 +1872,8 @@
 	data->rpc_done = 0;
 	data->rpc_status = 0;
 	data->timestamp = jiffies;
+	if (data->is_recover)
+		nfs4_set_sequence_privileged(&data->c_arg.seq_args);
 	task = rpc_run_task(&task_setup_data);
 	if (IS_ERR(task))
 		return PTR_ERR(task);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5db3246..d854693 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1725,7 +1725,8 @@
 			if (!test_and_clear_bit(ops->owner_flag_bit,
 							&sp->so_flags))
 				continue;
-			atomic_inc(&sp->so_count);
+			if (!atomic_inc_not_zero(&sp->so_count))
+				continue;
 			spin_unlock(&clp->cl_lock);
 			rcu_read_unlock();
 
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 28df12e..671cf68 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -409,7 +409,7 @@
 			__entry->flags = flags;
 			__entry->fmode = (__force unsigned int)ctx->mode;
 			__entry->dev = ctx->dentry->d_sb->s_dev;
-			if (!IS_ERR(state))
+			if (!IS_ERR_OR_NULL(state))
 				inode = state->inode;
 			if (inode != NULL) {
 				__entry->fileid = NFS_FILEID(inode);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 72624dc..75ab762 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -569,19 +569,17 @@
 	if (!nfs_pageio_add_request(pgio, req)) {
 		nfs_redirty_request(req);
 		ret = pgio->pg_error;
-	}
+	} else
+		nfs_add_stats(page_file_mapping(page)->host,
+				NFSIOS_WRITEPAGES, 1);
 out:
 	return ret;
 }
 
 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
 {
-	struct inode *inode = page_file_mapping(page)->host;
 	int ret;
 
-	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
-	nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
-
 	nfs_pageio_cond_complete(pgio, page_file_index(page));
 	ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
 	if (ret == -EAGAIN) {
@@ -597,9 +595,11 @@
 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
 {
 	struct nfs_pageio_descriptor pgio;
+	struct inode *inode = page_file_mapping(page)->host;
 	int err;
 
-	nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
+	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
+	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
 				false, &nfs_async_write_completion_ops);
 	err = nfs_do_writepage(page, wbc, &pgio);
 	nfs_pageio_complete(&pgio);
@@ -1223,7 +1223,7 @@
 		return 1;
 	if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
 		       list_empty_careful(&flctx->flc_posix)))
-		return 0;
+		return 1;
 
 	/* Check to see if there are whole file write locks */
 	ret = 0;
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index cdefaa3..c29d942 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -56,14 +56,6 @@
 	u32 device_generation = 0;
 	int error;
 
-	/*
-	 * We do not attempt to support I/O smaller than the fs block size,
-	 * or not aligned to it.
-	 */
-	if (args->lg_minlength < block_size) {
-		dprintk("pnfsd: I/O too small\n");
-		goto out_layoutunavailable;
-	}
 	if (seg->offset & (block_size - 1)) {
 		dprintk("pnfsd: I/O misaligned\n");
 		goto out_layoutunavailable;
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index ba1323a..a586467 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -70,6 +70,7 @@
 	unsigned order;
 	void *data;
 	int ret;
+	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
 
 	/* make various checks */
 	order = get_order(newsize);
@@ -84,7 +85,7 @@
 
 	/* allocate enough contiguous pages to be able to satisfy the
 	 * request */
-	pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order);
+	pages = alloc_pages(gfp, order);
 	if (!pages)
 		return -ENOMEM;
 
@@ -108,7 +109,7 @@
 		struct page *page = pages + loop;
 
 		ret = add_to_page_cache_lru(page, inode->i_mapping, loop,
-					GFP_KERNEL);
+					gfp);
 		if (ret < 0)
 			goto add_error;
 
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 96f3448..fd65b3f 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -652,11 +652,8 @@
 {
 	int err;
 
-	mutex_lock(&inode->i_mutex);
 	err = security_inode_init_security(inode, dentry, qstr,
 					   &init_xattrs, 0);
-	mutex_unlock(&inode->i_mutex);
-
 	if (err) {
 		struct ubifs_info *c = dentry->i_sb->s_fs_info;
 		ubifs_err(c, "cannot initialize security for inode %lu, error %d",
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index 94f9ea8..011dde0 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -1,15 +1,10 @@
 #ifndef _ASM_WORD_AT_A_TIME_H
 #define _ASM_WORD_AT_A_TIME_H
 
-/*
- * This says "generic", but it's actually big-endian only.
- * Little-endian can use more efficient versions of these
- * interfaces, see for example
- *	 arch/x86/include/asm/word-at-a-time.h
- * for those.
- */
-
 #include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
 
 struct word_at_a_time {
 	const unsigned long high_bits, low_bits;
@@ -53,4 +48,73 @@
 #define zero_bytemask(mask) (~1ul << __fls(mask))
 #endif
 
+#else
+
+/*
+ * The optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+	const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+	return mask*0x0001020304050608ul >> 56;
+}
+
+#else	/* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+	/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+	long a = (0x0ff0001+mask) >> 23;
+	/* Fix the 1 for 00 case */
+	return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+	unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+	*bits = mask;
+	return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+	return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+	bits = (bits - 1) & ~bits;
+	return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+	return count_masked_bytes(mask);
+}
+
+#endif /* __BIG_ENDIAN */
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 2a747a9..3febb4b 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -240,5 +240,6 @@
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
 extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
 
 #endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 499e9f6..0212d13 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -568,6 +568,10 @@
 #define MODE_I2C_READ	4
 #define MODE_I2C_STOP	8
 
+/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
+#define DP_MST_PHYSICAL_PORT_0 0
+#define DP_MST_LOGICAL_PORT_0 8
+
 #define DP_LINK_STATUS_SIZE	   6
 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
 			  int lane_count);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 86d0b25..5340099 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -253,6 +253,7 @@
 	u8 *bytes;
 };
 
+#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
 struct drm_dp_remote_i2c_read {
 	u8 num_transactions;
 	u8 port_number;
@@ -262,7 +263,7 @@
 		u8 *bytes;
 		u8 no_stop_bit;
 		u8 i2c_transaction_delay;
-	} transactions[4];
+	} transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
 	u8 read_i2c_device_id;
 	u8 num_bytes_read;
 };
@@ -374,6 +375,7 @@
 struct drm_dp_mst_topology_cbs {
 	/* create a connector for a port */
 	struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
+	void (*register_connector)(struct drm_connector *connector);
 	void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
 				  struct drm_connector *connector);
 	void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 7235c48..43856d1 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -217,6 +217,7 @@
 
 int acpi_pci_irq_enable (struct pci_dev *dev);
 void acpi_penalize_isa_irq(int irq, int active);
+bool acpi_isa_irq_available(int irq);
 void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
 void acpi_pci_irq_disable (struct pci_dev *dev);
 
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 2ff4a99..3feb1b2 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -151,6 +151,8 @@
 #define BCMA_CORE_PCIE2			0x83C	/* PCI Express Gen2 */
 #define BCMA_CORE_USB30_DEV		0x83D
 #define BCMA_CORE_ARM_CR4		0x83E
+#define BCMA_CORE_ARM_CA7		0x847
+#define BCMA_CORE_SYS_MEM		0x849
 #define BCMA_CORE_DEFAULT		0xFFF
 
 #define BCMA_MAX_NR_CORES		16
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 37d1602..5e7d43a 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -145,7 +145,6 @@
 	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
 	BLK_MQ_F_TAG_SHARED	= 1 << 1,
 	BLK_MQ_F_SG_MERGE	= 1 << 2,
-	BLK_MQ_F_SYSFS_UP	= 1 << 3,
 	BLK_MQ_F_DEFER_ISSUE	= 1 << 4,
 	BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
 	BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@@ -215,7 +214,7 @@
 void blk_mq_cancel_requeue_work(struct request_queue *q);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_abort_requeue_list(struct request_queue *q);
-void blk_mq_complete_request(struct request *rq);
+void blk_mq_complete_request(struct request *rq, int error);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -224,8 +223,6 @@
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
-		void *priv);
 void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
 		void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 99da9eb..19c2e94 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -456,6 +456,8 @@
 	struct blk_mq_tag_set	*tag_set;
 	struct list_head	tag_set_list;
 	struct bio_set		*bio_split;
+
+	bool			mq_sysfs_init_done;
 };
 
 #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f57d7fe..75718fa 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,7 +10,6 @@
 #include <uapi/linux/bpf.h>
 #include <linux/workqueue.h>
 #include <linux/file.h>
-#include <linux/perf_event.h>
 
 struct bpf_map;
 
@@ -37,6 +36,8 @@
 	u32 key_size;
 	u32 value_size;
 	u32 max_entries;
+	u32 pages;
+	struct user_struct *user;
 	const struct bpf_map_ops *ops;
 	struct work_struct work;
 };
@@ -101,6 +102,8 @@
 	BPF_WRITE = 2
 };
 
+struct bpf_prog;
+
 struct bpf_verifier_ops {
 	/* return eBPF function prototype for verification */
 	const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
@@ -112,7 +115,7 @@
 
 	u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
 				  int src_reg, int ctx_off,
-				  struct bpf_insn *insn);
+				  struct bpf_insn *insn, struct bpf_prog *prog);
 };
 
 struct bpf_prog_type_list {
@@ -121,14 +124,13 @@
 	enum bpf_prog_type type;
 };
 
-struct bpf_prog;
-
 struct bpf_prog_aux {
 	atomic_t refcnt;
 	u32 used_map_cnt;
 	const struct bpf_verifier_ops *ops;
 	struct bpf_map **used_maps;
 	struct bpf_prog *prog;
+	struct user_struct *user;
 	union {
 		struct work_struct work;
 		struct rcu_head	rcu;
@@ -168,6 +170,8 @@
 struct bpf_map *bpf_map_get(struct fd f);
 void bpf_map_put(struct bpf_map *map);
 
+extern int sysctl_unprivileged_bpf_disabled;
+
 /* verify correctness of eBPF program */
 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
 #else
@@ -190,7 +194,6 @@
 extern const struct bpf_func_proto bpf_map_update_elem_proto;
 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
 
-extern const struct bpf_func_proto bpf_perf_event_read_proto;
 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
 extern const struct bpf_func_proto bpf_tail_call_proto;
@@ -201,4 +204,8 @@
 extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
 extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
 
+/* Shared helpers among cBPF and eBPF. */
+void bpf_user_rnd_init_once(void);
+u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+
 #endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 697ca77..59f4a73 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -30,6 +30,8 @@
 #define PHY_ID_BCM7439_2		0xae025080
 #define PHY_ID_BCM7445			0x600d8510
 
+#define PHY_ID_BCM_CYGNUS		0xae025200
+
 #define PHY_BCM_OUI_MASK		0xfffffc00
 #define PHY_BCM_OUI_1			0x00206000
 #define PHY_BCM_OUI_2			0x0143bc00
@@ -138,7 +140,10 @@
 
 /* 01010: Auto Power-Down */
 #define BCM54XX_SHD_APD			0x0a
+#define  BCM_APD_CLR_MASK		0xFE9F /* clear bits 5, 6 & 8 */
 #define  BCM54XX_SHD_APD_EN		0x0020
+#define  BCM_NO_ANEG_APD_EN		0x0060 /* bits 5 & 6 */
+#define  BCM_APD_SINGLELP_EN	0x0100 /* Bit 8 */
 
 #define BCM5482_SHD_LEDS1	0x0d	/* 01101: LED Selector 1 */
 					/* LED3 / ~LINKSPD[2] selector */
@@ -209,27 +214,13 @@
 #define MII_BRCM_FET_SHDW_AUXSTAT2	0x1b	/* Auxiliary status 2 */
 #define MII_BRCM_FET_SHDW_AS2_APDE	0x0020	/* Auto power down enable */
 
-/*
- * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
- * 0x1c shadow registers.
- */
-static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow)
-{
-	phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
-	return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
-}
-
-static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow,
-				       u16 val)
-{
-	return phy_write(phydev, MII_BCM54XX_SHD,
-			 MII_BCM54XX_SHD_WRITE |
-			 MII_BCM54XX_SHD_VAL(shadow) |
-			 MII_BCM54XX_SHD_DATA(val));
-}
-
 #define BRCM_CL45VEN_EEE_CONTROL	0x803d
 #define LPI_FEATURE_EN			0x8000
 #define LPI_FEATURE_EN_DIG1000X		0x4000
 
+/* Core register definitions*/
+#define MII_BRCM_CORE_BASE1E	0x1E
+#define MII_BRCM_CORE_EXPB0	0xB0
+#define MII_BRCM_CORE_EXPB1	0xB1
+
 #endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 56dcadd..735f9f8 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -78,7 +78,7 @@
 #define get_canfd_dlc(i)	(min_t(__u8, (i), CANFD_MAX_DLC))
 
 /* Drop a given socketbuffer if it does not contain a valid CAN frame. */
-static inline int can_dropped_invalid_skb(struct net_device *dev,
+static inline bool can_dropped_invalid_skb(struct net_device *dev,
 					  struct sk_buff *skb)
 {
 	const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
@@ -94,12 +94,12 @@
 	} else
 		goto inval_skb;
 
-	return 0;
+	return false;
 
 inval_skb:
 	kfree_skb(skb);
 	dev->stats.tx_dropped++;
-	return 1;
+	return true;
 }
 
 static inline bool can_is_canfd_skb(const struct sk_buff *skb)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index dfaa7b3..82c159e 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -237,6 +237,10 @@
 #define KASAN_ABI_VERSION 3
 #endif
 
+#if GCC_VERSION >= 50000
+#define CC_HAVE_BUILTIN_OVERFLOW
+#endif
+
 #endif	/* gcc version >= 40000 specific checks */
 
 #if !defined(__noclone)
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 2210254..61d042b 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -202,16 +202,16 @@
 #define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
 #define DCCP_SERVICE_CODE_IS_ABSENT		0
 
-static inline int dccp_list_has_service(const struct dccp_service_list *sl,
+static inline bool dccp_list_has_service(const struct dccp_service_list *sl,
 					const __be32 service)
 {
 	if (likely(sl != NULL)) {
 		u32 i = sl->dccpsl_nr;
 		while (i--)
 			if (sl->dccpsl_list[i] == service)
-				return 1;
+				return true;
 	}
-	return 0;
+	return false;
 }
 
 struct dccp_ackvec;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fa2cab9..4165e9a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -13,6 +13,7 @@
 #include <linux/printk.h>
 #include <linux/workqueue.h>
 #include <linux/sched.h>
+#include <net/sch_generic.h>
 
 #include <asm/cacheflush.h>
 
@@ -302,10 +303,6 @@
 	bpf_size;						\
 })
 
-/* Macro to invoke filter function. */
-#define SK_RUN_FILTER(filter, ctx) \
-	(*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
-
 #ifdef CONFIG_COMPAT
 /* A struct sock_filter is architecture independent. */
 struct compat_sock_fprog {
@@ -326,8 +323,12 @@
 
 struct bpf_prog {
 	u16			pages;		/* Number of allocated pages */
-	bool			jited;		/* Is our filter JIT'ed? */
-	bool			gpl_compatible;	/* Is our filter GPL compatible? */
+	kmemcheck_bitfield_begin(meta);
+	u16			jited:1,	/* Is our filter JIT'ed? */
+				gpl_compatible:1, /* Is filter GPL compatible? */
+				cb_access:1,	/* Is control block accessed? */
+				dst_needed:1;	/* Do we need dst entry? */
+	kmemcheck_bitfield_end(meta);
 	u32			len;		/* Number of filter blocks */
 	enum bpf_prog_type	type;		/* Type of BPF program */
 	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
@@ -349,6 +350,39 @@
 
 #define BPF_PROG_RUN(filter, ctx)  (*filter->bpf_func)(ctx, filter->insnsi)
 
+static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
+				       struct sk_buff *skb)
+{
+	u8 *cb_data = qdisc_skb_cb(skb)->data;
+	u8 saved_cb[QDISC_CB_PRIV_LEN];
+	u32 res;
+
+	BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
+		     QDISC_CB_PRIV_LEN);
+
+	if (unlikely(prog->cb_access)) {
+		memcpy(saved_cb, cb_data, sizeof(saved_cb));
+		memset(cb_data, 0, sizeof(saved_cb));
+	}
+
+	res = BPF_PROG_RUN(prog, skb);
+
+	if (unlikely(prog->cb_access))
+		memcpy(cb_data, saved_cb, sizeof(saved_cb));
+
+	return res;
+}
+
+static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
+					struct sk_buff *skb)
+{
+	u8 *cb_data = qdisc_skb_cb(skb)->data;
+
+	if (unlikely(prog->cb_access))
+		memset(cb_data, 0, QDISC_CB_PRIV_LEN);
+	return BPF_PROG_RUN(prog, skb);
+}
+
 static inline unsigned int bpf_prog_size(unsigned int proglen)
 {
 	return max(sizeof(struct bpf_prog),
@@ -408,7 +442,7 @@
 
 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
-			      bpf_aux_classic_check_t trans);
+			      bpf_aux_classic_check_t trans, bool save_orig);
 void bpf_prog_destroy(struct bpf_prog *fp);
 
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 09460d6..a4c61cb 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -8,7 +8,7 @@
 extern void genl_lock(void);
 extern void genl_unlock(void);
 #ifdef CONFIG_LOCKDEP
-extern int lockdep_genl_is_held(void);
+extern bool lockdep_genl_is_held(void);
 #endif
 
 /* for synchronisation between af_netlink and genetlink */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index cfa906f..452c0b0 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -121,7 +121,7 @@
 #define IEEE80211_MAX_SN		IEEE80211_SN_MASK
 #define IEEE80211_SN_MODULO		(IEEE80211_MAX_SN + 1)
 
-static inline int ieee80211_sn_less(u16 sn1, u16 sn2)
+static inline bool ieee80211_sn_less(u16 sn1, u16 sn2)
 {
 	return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1);
 }
@@ -250,7 +250,7 @@
  * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_tods(__le16 fc)
+static inline bool ieee80211_has_tods(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0;
 }
@@ -259,7 +259,7 @@
  * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_fromds(__le16 fc)
+static inline bool ieee80211_has_fromds(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0;
 }
@@ -268,7 +268,7 @@
  * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_a4(__le16 fc)
+static inline bool ieee80211_has_a4(__le16 fc)
 {
 	__le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS);
 	return (fc & tmp) == tmp;
@@ -278,7 +278,7 @@
  * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_morefrags(__le16 fc)
+static inline bool ieee80211_has_morefrags(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0;
 }
@@ -287,7 +287,7 @@
  * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_retry(__le16 fc)
+static inline bool ieee80211_has_retry(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0;
 }
@@ -296,7 +296,7 @@
  * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_pm(__le16 fc)
+static inline bool ieee80211_has_pm(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0;
 }
@@ -305,7 +305,7 @@
  * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_moredata(__le16 fc)
+static inline bool ieee80211_has_moredata(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0;
 }
@@ -314,7 +314,7 @@
  * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_protected(__le16 fc)
+static inline bool ieee80211_has_protected(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0;
 }
@@ -323,7 +323,7 @@
  * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_has_order(__le16 fc)
+static inline bool ieee80211_has_order(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0;
 }
@@ -332,7 +332,7 @@
  * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_mgmt(__le16 fc)
+static inline bool ieee80211_is_mgmt(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT);
@@ -342,7 +342,7 @@
  * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_ctl(__le16 fc)
+static inline bool ieee80211_is_ctl(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL);
@@ -352,7 +352,7 @@
  * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_data(__le16 fc)
+static inline bool ieee80211_is_data(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_DATA);
@@ -362,7 +362,7 @@
  * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_data_qos(__le16 fc)
+static inline bool ieee80211_is_data_qos(__le16 fc)
 {
 	/*
 	 * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need
@@ -376,7 +376,7 @@
  * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_data_present(__le16 fc)
+static inline bool ieee80211_is_data_present(__le16 fc)
 {
 	/*
 	 * mask with 0x40 and test that that bit is clear to only return true
@@ -390,7 +390,7 @@
  * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_assoc_req(__le16 fc)
+static inline bool ieee80211_is_assoc_req(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ);
@@ -400,7 +400,7 @@
  * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_assoc_resp(__le16 fc)
+static inline bool ieee80211_is_assoc_resp(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP);
@@ -410,7 +410,7 @@
  * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_reassoc_req(__le16 fc)
+static inline bool ieee80211_is_reassoc_req(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ);
@@ -420,7 +420,7 @@
  * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_reassoc_resp(__le16 fc)
+static inline bool ieee80211_is_reassoc_resp(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP);
@@ -430,7 +430,7 @@
  * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_probe_req(__le16 fc)
+static inline bool ieee80211_is_probe_req(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ);
@@ -440,7 +440,7 @@
  * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_probe_resp(__le16 fc)
+static inline bool ieee80211_is_probe_resp(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
@@ -450,7 +450,7 @@
  * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_beacon(__le16 fc)
+static inline bool ieee80211_is_beacon(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
@@ -460,7 +460,7 @@
  * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_atim(__le16 fc)
+static inline bool ieee80211_is_atim(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM);
@@ -470,7 +470,7 @@
  * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_disassoc(__le16 fc)
+static inline bool ieee80211_is_disassoc(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC);
@@ -480,7 +480,7 @@
  * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_auth(__le16 fc)
+static inline bool ieee80211_is_auth(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
@@ -490,7 +490,7 @@
  * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_deauth(__le16 fc)
+static inline bool ieee80211_is_deauth(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH);
@@ -500,7 +500,7 @@
  * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_action(__le16 fc)
+static inline bool ieee80211_is_action(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION);
@@ -510,7 +510,7 @@
  * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_back_req(__le16 fc)
+static inline bool ieee80211_is_back_req(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
@@ -520,7 +520,7 @@
  * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_back(__le16 fc)
+static inline bool ieee80211_is_back(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK);
@@ -530,7 +530,7 @@
  * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_pspoll(__le16 fc)
+static inline bool ieee80211_is_pspoll(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
@@ -540,7 +540,7 @@
  * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_rts(__le16 fc)
+static inline bool ieee80211_is_rts(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
@@ -550,7 +550,7 @@
  * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_cts(__le16 fc)
+static inline bool ieee80211_is_cts(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
@@ -560,7 +560,7 @@
  * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_ack(__le16 fc)
+static inline bool ieee80211_is_ack(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK);
@@ -570,7 +570,7 @@
  * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_cfend(__le16 fc)
+static inline bool ieee80211_is_cfend(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND);
@@ -580,7 +580,7 @@
  * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_cfendack(__le16 fc)
+static inline bool ieee80211_is_cfendack(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK);
@@ -590,7 +590,7 @@
  * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_nullfunc(__le16 fc)
+static inline bool ieee80211_is_nullfunc(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC);
@@ -600,7 +600,7 @@
  * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame
  * @fc: frame control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_qos_nullfunc(__le16 fc)
+static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
 {
 	return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
 	       cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
@@ -624,7 +624,7 @@
  * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
  * @seq_ctrl: frame sequence control bytes in little-endian byteorder
  */
-static inline int ieee80211_is_first_frag(__le16 seq_ctrl)
+static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
 {
 	return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0;
 }
@@ -1379,6 +1379,7 @@
 
 
 /* block-ack parameters */
+#define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001
 #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
 #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
 #define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
@@ -1745,8 +1746,7 @@
 	WLAN_EID_TIM = 5,
 	WLAN_EID_IBSS_PARAMS = 6,
 	WLAN_EID_COUNTRY = 7,
-	WLAN_EID_HP_PARAMS = 8,
-	WLAN_EID_HP_TABLE = 9,
+	/* 8, 9 reserved */
 	WLAN_EID_REQUEST = 10,
 	WLAN_EID_QBSS_LOAD = 11,
 	WLAN_EID_EDCA_PARAM_SET = 12,
@@ -1932,6 +1932,8 @@
 	WLAN_CATEGORY_HT = 7,
 	WLAN_CATEGORY_SA_QUERY = 8,
 	WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
+	WLAN_CATEGORY_WNM = 10,
+	WLAN_CATEGORY_WNM_UNPROTECTED = 11,
 	WLAN_CATEGORY_TDLS = 12,
 	WLAN_CATEGORY_MESH_ACTION = 13,
 	WLAN_CATEGORY_MULTIHOP_ACTION = 14,
@@ -2396,7 +2398,10 @@
 		category = ((u8 *) hdr) + 24;
 		return *category != WLAN_CATEGORY_PUBLIC &&
 			*category != WLAN_CATEGORY_HT &&
+			*category != WLAN_CATEGORY_WNM_UNPROTECTED &&
 			*category != WLAN_CATEGORY_SELF_PROTECTED &&
+			*category != WLAN_CATEGORY_UNPROT_DMG &&
+			*category != WLAN_CATEGORY_VHT &&
 			*category != WLAN_CATEGORY_VENDOR_SPECIFIC;
 	}
 
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index db01492..d3e4156 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -25,12 +25,22 @@
 
 #include <linux/types.h>
 #include <linux/random.h>
-#include <asm/byteorder.h>
 
 #define IEEE802154_MTU			127
 #define IEEE802154_ACK_PSDU_LEN		5
 #define IEEE802154_MIN_PSDU_LEN		9
 #define IEEE802154_FCS_LEN		2
+#define IEEE802154_MAX_AUTH_TAG_LEN	16
+
+/*  General MAC frame format:
+ *  2 bytes: Frame Control
+ *  1 byte:  Sequence Number
+ * 20 bytes: Addressing fields
+ * 14 bytes: Auxiliary Security Header
+ */
+#define IEEE802154_MAX_HEADER_LEN	(2 + 1 + 20 + 14)
+#define IEEE802154_MIN_HEADER_LEN	(IEEE802154_ACK_PSDU_LEN - \
+					 IEEE802154_FCS_LEN)
 
 #define IEEE802154_PAN_ID_BROADCAST	0xffff
 #define IEEE802154_ADDR_SHORT_BROADCAST	0xffff
@@ -207,6 +217,7 @@
 
 /* frame control handling */
 #define IEEE802154_FCTL_FTYPE		0x0003
+#define IEEE802154_FCTL_ACKREQ		0x0020
 #define IEEE802154_FCTL_INTRA_PAN	0x0040
 
 #define IEEE802154_FTYPE_DATA		0x0001
@@ -222,6 +233,15 @@
 }
 
 /**
+ * ieee802154_is_ackreq - check if acknowledgment request bit is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee802154_is_ackreq(__le16 fc)
+{
+	return fc & cpu_to_le16(IEEE802154_FCTL_ACKREQ);
+}
+
+/**
  * ieee802154_is_intra_pan - check if intra pan id communication
  * @fc: frame control bytes in little-endian byteorder
  */
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index ae5d0d2..f923d15 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -24,5 +24,6 @@
 	__u32 min_tx_rate;
 	__u32 max_tx_rate;
 	__u32 rss_query_en;
+	__u32 trusted;
 };
 #endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 9084292..9c9de11 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -110,7 +110,7 @@
 #define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value)
 #define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value)
 
-extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto);
+extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
 extern int igmp_rcv(struct sk_buff *);
 extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
 extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index a4328ce..ee971f3 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -171,7 +171,7 @@
 			 __be32 local, int scope);
 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
 				    __be32 mask);
-static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
+static __inline__ bool inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
 {
 	return !((addr^ifa->ifa_address)&ifa->ifa_mask);
 }
@@ -180,15 +180,15 @@
  *	Check if a mask is acceptable.
  */
  
-static __inline__ int bad_mask(__be32 mask, __be32 addr)
+static __inline__ bool bad_mask(__be32 mask, __be32 addr)
 {
 	__u32 hmask;
 	if (addr & (mask = ~mask))
-		return 1;
+		return true;
 	hmask = ntohl(mask);
 	if (hmask & (hmask+1))
-		return 1;
-	return 0;
+		return true;
+	return false;
 }
 
 #define for_primary_ifa(in_dev)	{ struct in_ifaddr *ifa; \
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 3920a19..92f7177 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -68,8 +68,8 @@
 	return iova >> iova_shift(iovad);
 }
 
-int iommu_iova_cache_init(void);
-void iommu_iova_cache_destroy(void);
+int iova_cache_get(void);
+void iova_cache_put(void);
 
 struct iova *alloc_iova_mem(void);
 void free_iova_mem(struct iova *iova);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index f1f32af..0ef2a97 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -264,9 +264,9 @@
 };
 
 #if IS_ENABLED(CONFIG_IPV6)
-static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
+static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
 {
-	return inet_sk(__sk)->pinet6;
+	return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
 }
 
 static inline struct raw6_sock *raw6_sk(const struct sock *sk)
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index d3ca792..f644fdb 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -161,6 +161,11 @@
 	IRQ_DOMAIN_FLAG_NONCORE		= (1 << 16),
 };
 
+static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
+{
+	return d->of_node;
+}
+
 #ifdef CONFIG_IRQ_DOMAIN
 struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
 				    irq_hw_number_t hwirq_max, int direct_max,
diff --git a/include/linux/leds.h b/include/linux/leds.h
index b122eea..fa359c7 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -283,6 +283,13 @@
 static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
 static inline void led_trigger_event(struct led_trigger *trigger,
 				enum led_brightness event) {}
+static inline void led_trigger_blink(struct led_trigger *trigger,
+				      unsigned long *delay_on,
+				      unsigned long *delay_off) {}
+static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
+				      unsigned long *delay_on,
+				      unsigned long *delay_off,
+				      int invert) {}
 static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
 static inline void led_trigger_set(struct led_classdev *led_cdev,
 				struct led_trigger *trigger) {}
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ad800e6..6452ff4 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -242,7 +242,6 @@
 	 * percpu counter.
 	 */
 	struct mem_cgroup_stat_cpu __percpu *stat;
-	spinlock_t pcp_counter_lock;
 
 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
 	struct cg_proto tcp_mem;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index baad4cb..5a8677b 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -833,6 +833,7 @@
 	struct mlx4_quotas	quotas;
 	struct radix_tree_root	qp_table_tree;
 	u8			rev_id;
+	u8			port_random_macs;
 	char			board_id[MLX4_BOARD_ID_LEN];
 	int			numa_node;
 	int			oper_log_mgm_entry_size;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 41e9f3b..0b473cb 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -402,17 +402,6 @@
 	u8			rsvd[8];
 };
 
-struct mlx5_cmd_query_special_contexts_mbox_in {
-	struct mlx5_inbox_hdr	hdr;
-	u8			rsvd[8];
-};
-
-struct mlx5_cmd_query_special_contexts_mbox_out {
-	struct mlx5_outbox_hdr	hdr;
-	__be32                  dump_fill_mkey;
-	__be32                  resd_lkey;
-};
-
 struct mlx5_cmd_layout {
 	u8		type;
 	u8		rsvd0[3];
@@ -450,7 +439,8 @@
 	__be32			cmdq_addr_h;
 	__be32			cmdq_addr_l_sz;
 	__be32			cmd_dbell;
-	__be32			rsvd1[121];
+	__be32			rsvd1[120];
+	__be32			initializing;
 	struct health_buffer	health;
 	__be32			rsvd2[884];
 	__be32			health_counter;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 27b53f9..5c857f2 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -391,9 +391,11 @@
 	struct health_buffer __iomem   *health;
 	__be32 __iomem		       *health_counter;
 	struct timer_list		timer;
-	struct list_head		list;
 	u32				prev;
 	int				miss_counter;
+	bool				sick;
+	struct workqueue_struct	       *wq;
+	struct work_struct		work;
 };
 
 struct mlx5_cq_table {
@@ -485,8 +487,26 @@
 	spinlock_t              ctx_lock;
 };
 
+enum mlx5_device_state {
+	MLX5_DEVICE_STATE_UP,
+	MLX5_DEVICE_STATE_INTERNAL_ERROR,
+};
+
+enum mlx5_interface_state {
+	MLX5_INTERFACE_STATE_DOWN,
+	MLX5_INTERFACE_STATE_UP,
+};
+
+enum mlx5_pci_status {
+	MLX5_PCI_STATUS_DISABLED,
+	MLX5_PCI_STATUS_ENABLED,
+};
+
 struct mlx5_core_dev {
 	struct pci_dev	       *pdev;
+	/* sync pci state */
+	struct mutex		pci_status_mutex;
+	enum mlx5_pci_status	pci_status;
 	u8			rev_id;
 	char			board_id[MLX5_BOARD_ID_LEN];
 	struct mlx5_cmd		cmd;
@@ -495,6 +515,10 @@
 	u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
 	phys_addr_t		iseg_base;
 	struct mlx5_init_seg __iomem *iseg;
+	enum mlx5_device_state	state;
+	/* sync interface state */
+	struct mutex		intf_state_mutex;
+	enum mlx5_interface_state interface_state;
 	void			(*event) (struct mlx5_core_dev *dev,
 					  enum mlx5_dev_event event,
 					  unsigned long param);
@@ -676,8 +700,8 @@
 int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
 int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
-void mlx5_health_cleanup(void);
-void  __init mlx5_health_init(void);
+void mlx5_health_cleanup(struct mlx5_core_dev *dev);
+int mlx5_health_init(struct mlx5_core_dev *dev);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
@@ -731,7 +755,7 @@
 #endif
 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 		       int nent, u64 mask, const char *name, struct mlx5_uar *uar);
@@ -802,6 +826,11 @@
 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
 			struct mlx5_odp_caps *odp_caps);
 
+static inline int fw_initializing(struct mlx5_core_dev *dev)
+{
+	return ioread32be(&dev->iseg->initializing) >> 31;
+}
+
 static inline u32 mlx5_mkey_to_idx(u32 mkey)
 {
 	return mkey >> 8;
@@ -845,7 +874,6 @@
 int mlx5_register_interface(struct mlx5_interface *intf);
 void mlx5_unregister_interface(struct mlx5_interface *intf);
 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
-int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey);
 
 struct mlx5_profile {
 	u64	mask;
@@ -866,4 +894,8 @@
 	return 8 * (1 << param);
 }
 
+enum {
+	MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
+};
+
 #endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 91c08f6..80001de 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -905,6 +905,27 @@
 #endif
 }
 
+#ifdef CONFIG_MEMCG
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+	return page->mem_cgroup;
+}
+
+static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
+{
+	page->mem_cgroup = memcg;
+}
+#else
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+	return NULL;
+}
+
+static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
+{
+}
+#endif
+
 /*
  * Some inline functions in vmstat.h depend on page_zone()
  */
diff --git a/include/linux/net.h b/include/linux/net.h
index 049d4b0..70ac5e2 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -24,7 +24,8 @@
 #include <linux/fcntl.h>	/* For O_CLOEXEC and O_NONBLOCK */
 #include <linux/kmemcheck.h>
 #include <linux/rcupdate.h>
-#include <linux/jump_label.h>
+#include <linux/once.h>
+
 #include <uapi/linux/net.h>
 
 struct poll_table_struct;
@@ -250,22 +251,8 @@
 	} while (0)
 #endif
 
-bool __net_get_random_once(void *buf, int nbytes, bool *done,
-			   struct static_key *done_key);
-
-#define net_get_random_once(buf, nbytes)				\
-	({								\
-		bool ___ret = false;					\
-		static bool ___done = false;				\
-		static struct static_key ___once_key =			\
-			STATIC_KEY_INIT_TRUE;				\
-		if (static_key_true(&___once_key))			\
-			___ret = __net_get_random_once(buf,		\
-						       nbytes,		\
-						       &___done,	\
-						       &___once_key);	\
-		___ret;							\
-	})
+#define net_get_random_once(buf, nbytes)			\
+	get_random_once((buf), (nbytes))
 
 int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
 		   size_t num, size_t len);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d2ffeaf..4ac653b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -881,6 +881,7 @@
  * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
  *			  int max_tx_rate);
  * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
+ * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
  * int (*ndo_get_vf_config)(struct net_device *dev,
  *			    int vf, struct ifla_vf_info *ivf);
  * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
@@ -1054,6 +1055,10 @@
  *	This function is used to pass protocol port error state information
  *	to the switch driver. The switch driver can react to the proto_down
  *      by doing a phys down on the associated switch port.
+ * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
+ *	This function is used to get egress tunnel information for given skb.
+ *	This is useful for retrieving outer tunnel header parameters while
+ *	sampling packet.
  *
  */
 struct net_device_ops {
@@ -1109,6 +1114,8 @@
 						   int max_tx_rate);
 	int			(*ndo_set_vf_spoofchk)(struct net_device *dev,
 						       int vf, bool setting);
+	int			(*ndo_set_vf_trust)(struct net_device *dev,
+						    int vf, bool setting);
 	int			(*ndo_get_vf_config)(struct net_device *dev,
 						     int vf,
 						     struct ifla_vf_info *ivf);
@@ -1227,6 +1234,8 @@
 	int			(*ndo_get_iflink)(const struct net_device *dev);
 	int			(*ndo_change_proto_down)(struct net_device *dev,
 							 bool proto_down);
+	int			(*ndo_fill_metadata_dst)(struct net_device *dev,
+						       struct sk_buff *skb);
 };
 
 /**
@@ -1258,9 +1267,10 @@
  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
  *	change when it's running
  * @IFF_MACVLAN: Macvlan device
- * @IFF_VRF_MASTER: device is a VRF master
+ * @IFF_L3MDEV_MASTER: device is an L3 master device
  * @IFF_NO_QUEUE: device can run without qdisc attached
  * @IFF_OPENVSWITCH: device is a Open vSwitch master
+ * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
  */
 enum netdev_priv_flags {
 	IFF_802_1Q_VLAN			= 1<<0,
@@ -1283,9 +1293,10 @@
 	IFF_XMIT_DST_RELEASE_PERM	= 1<<17,
 	IFF_IPVLAN_MASTER		= 1<<18,
 	IFF_IPVLAN_SLAVE		= 1<<19,
-	IFF_VRF_MASTER			= 1<<20,
+	IFF_L3MDEV_MASTER		= 1<<20,
 	IFF_NO_QUEUE			= 1<<21,
 	IFF_OPENVSWITCH			= 1<<22,
+	IFF_L3MDEV_SLAVE		= 1<<23,
 };
 
 #define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
@@ -1308,7 +1319,7 @@
 #define IFF_XMIT_DST_RELEASE_PERM	IFF_XMIT_DST_RELEASE_PERM
 #define IFF_IPVLAN_MASTER		IFF_IPVLAN_MASTER
 #define IFF_IPVLAN_SLAVE		IFF_IPVLAN_SLAVE
-#define IFF_VRF_MASTER			IFF_VRF_MASTER
+#define IFF_L3MDEV_MASTER		IFF_L3MDEV_MASTER
 #define IFF_NO_QUEUE			IFF_NO_QUEUE
 #define IFF_OPENVSWITCH			IFF_OPENVSWITCH
 
@@ -1427,7 +1438,6 @@
  *	@dn_ptr:	DECnet specific data
  *	@ip6_ptr:	IPv6 specific data
  *	@ax25_ptr:	AX.25 specific data
- *	@vrf_ptr:	VRF specific data
  *	@ieee80211_ptr:	IEEE 802.11 specific data, assign before registering
  *
  *	@last_rx:	Time of last Rx
@@ -1587,6 +1597,9 @@
 #ifdef CONFIG_NET_SWITCHDEV
 	const struct switchdev_ops *switchdev_ops;
 #endif
+#ifdef CONFIG_NET_L3_MASTER_DEV
+	const struct l3mdev_ops	*l3mdev_ops;
+#endif
 
 	const struct header_ops *header_ops;
 
@@ -1646,7 +1659,6 @@
 	struct dn_dev __rcu     *dn_ptr;
 	struct inet6_dev __rcu	*ip6_ptr;
 	void			*ax25_ptr;
-	struct net_vrf_dev __rcu *vrf_ptr;
 	struct wireless_dev	*ieee80211_ptr;
 	struct wpan_dev		*ieee802154_ptr;
 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
@@ -2103,6 +2115,7 @@
 #define NETDEV_PRECHANGEMTU	0x0017 /* notify before mtu change happened */
 #define NETDEV_CHANGEINFODATA	0x0018
 #define NETDEV_BONDING_INFO	0x0019
+#define NETDEV_PRECHANGEUPPER	0x001A
 
 int register_netdevice_notifier(struct notifier_block *nb);
 int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2203,6 +2216,7 @@
 void dev_remove_offload(struct packet_offload *po);
 
 int dev_get_iflink(const struct net_device *dev);
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
 				      unsigned short mask);
 struct net_device *dev_get_by_name(struct net *net, const char *name);
@@ -3824,9 +3838,14 @@
 	return dev->priv_flags & IFF_SUPP_NOFCS;
 }
 
-static inline bool netif_is_vrf(const struct net_device *dev)
+static inline bool netif_is_l3_master(const struct net_device *dev)
 {
-	return dev->priv_flags & IFF_VRF_MASTER;
+	return dev->priv_flags & IFF_L3MDEV_MASTER;
+}
+
+static inline bool netif_is_l3_slave(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_L3MDEV_SLAVE;
 }
 
 static inline bool netif_is_bridge_master(const struct net_device *dev)
@@ -3839,27 +3858,6 @@
 	return dev->priv_flags & IFF_OPENVSWITCH;
 }
 
-static inline bool netif_index_is_vrf(struct net *net, int ifindex)
-{
-	bool rc = false;
-
-#if IS_ENABLED(CONFIG_NET_VRF)
-	struct net_device *dev;
-
-	if (ifindex == 0)
-		return false;
-
-	rcu_read_lock();
-
-	dev = dev_get_by_index_rcu(net, ifindex);
-	if (dev)
-		rc = netif_is_vrf(dev);
-
-	rcu_read_unlock();
-#endif
-	return rc;
-}
-
 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
 static inline void netif_keep_dst(struct net_device *dev)
 {
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 987c74c..0ad5567 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -90,7 +90,6 @@
 	/* User fills in from here down. */
 	nf_hookfn		*hook;
 	struct net_device	*dev;
-	struct module		*owner;
 	void			*priv;
 	u_int8_t		pf;
 	unsigned int		hooknum;
@@ -283,7 +282,7 @@
 				 struct flowi *fl, bool strict);
 	void		(*saveroute)(const struct sk_buff *skb,
 				     struct nf_queue_entry *entry);
-	int		(*reroute)(struct sk_buff *skb,
+	int		(*reroute)(struct net *net, struct sk_buff *skb,
 				   const struct nf_queue_entry *entry);
 	int		route_key_size;
 };
@@ -347,8 +346,23 @@
 }
 
 #else /* !CONFIG_NETFILTER */
-#define NF_HOOK(pf, hook, net, sk, skb, indev, outdev, okfn) (okfn)(net, sk, skb)
-#define NF_HOOK_COND(pf, hook, net, sk, skb, indev, outdev, okfn, cond) (okfn)(net, sk, skb)
+static inline int
+NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+	     struct sk_buff *skb, struct net_device *in, struct net_device *out,
+	     int (*okfn)(struct net *, struct sock *, struct sk_buff *),
+	     bool cond)
+{
+	return okfn(net, sk, skb);
+}
+
+static inline int
+NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+	struct sk_buff *skb, struct net_device *in, struct net_device *out,
+	int (*okfn)(struct net *, struct sock *, struct sk_buff *))
+{
+	return okfn(net, sk, skb);
+}
+
 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
 			  struct sock *sk, struct sk_buff *skb,
 			  struct net_device *indev, struct net_device *outdev,
@@ -369,24 +383,28 @@
 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
 void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
+#else
+static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+#endif
 
 struct nf_conn;
 enum ip_conntrack_info;
 struct nlattr;
 
-struct nfq_ct_hook {
+struct nfnl_ct_hook {
+	struct nf_conn *(*get_ct)(const struct sk_buff *skb,
+				  enum ip_conntrack_info *ctinfo);
 	size_t (*build_size)(const struct nf_conn *ct);
-	int (*build)(struct sk_buff *skb, struct nf_conn *ct);
+	int (*build)(struct sk_buff *skb, struct nf_conn *ct,
+		     enum ip_conntrack_info ctinfo,
+		     u_int16_t ct_attr, u_int16_t ct_info_attr);
 	int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
 	int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
 			     u32 portid, u32 report);
 	void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
 			   enum ip_conntrack_info ctinfo, s32 off);
 };
-extern struct nfq_ct_hook __rcu *nfq_ct_hook;
-#else
-static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
-#endif
+extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
 
 /**
  * nf_skb_duplicated - TEE target has sent a packet
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index e955d47..249d1bb 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -45,11 +45,11 @@
 void nfnl_lock(__u8 subsys_id);
 void nfnl_unlock(__u8 subsys_id);
 #ifdef CONFIG_PROVE_LOCKING
-int lockdep_nfnl_is_held(__u8 subsys_id);
+bool lockdep_nfnl_is_held(__u8 subsys_id);
 #else
-static inline int lockdep_nfnl_is_held(__u8 subsys_id)
+static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
 {
-	return 1;
+	return true;
 }
 #endif /* CONFIG_PROVE_LOCKING */
 
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index 6e4591b..98c03b2 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -6,7 +6,7 @@
 
 #include <uapi/linux/netfilter_ipv4.h>
 
-int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
+int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
 __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
 		       unsigned int dataoff, u_int8_t protocol);
 #endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 7715746..47c6b04 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -17,12 +17,12 @@
 	int (*chk_addr)(struct net *net, const struct in6_addr *addr,
 			const struct net_device *dev, int strict);
 	void (*route_input)(struct sk_buff *skb);
-	int (*fragment)(struct sock *sk, struct sk_buff *skb,
-			int (*output)(struct sock *, struct sk_buff *));
+	int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
+			int (*output)(struct net *, struct sock *, struct sk_buff *));
 };
 
 #ifdef CONFIG_NETFILTER
-int ip6_route_me_harder(struct sk_buff *skb);
+int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
 __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
 			unsigned int dataoff, u_int8_t protocol);
 
diff --git a/include/linux/once.h b/include/linux/once.h
new file mode 100644
index 0000000..285f12c
--- /dev/null
+++ b/include/linux/once.h
@@ -0,0 +1,57 @@
+#ifndef _LINUX_ONCE_H
+#define _LINUX_ONCE_H
+
+#include <linux/types.h>
+#include <linux/jump_label.h>
+
+bool __do_once_start(bool *done, unsigned long *flags);
+void __do_once_done(bool *done, struct static_key *once_key,
+		    unsigned long *flags);
+
+/* Call a function exactly once. The idea of DO_ONCE() is to perform
+ * a function call such as initialization of random seeds, etc, only
+ * once, where DO_ONCE() can live in the fast-path. After @func has
+ * been called with the passed arguments, the static key will patch
+ * out the condition into a nop. DO_ONCE() guarantees type safety of
+ * arguments!
+ *
+ * Not that the following is not equivalent ...
+ *
+ *   DO_ONCE(func, arg);
+ *   DO_ONCE(func, arg);
+ *
+ * ... to this version:
+ *
+ *   void foo(void)
+ *   {
+ *     DO_ONCE(func, arg);
+ *   }
+ *
+ *   foo();
+ *   foo();
+ *
+ * In case the one-time invocation could be triggered from multiple
+ * places, then a common helper function must be defined, so that only
+ * a single static key will be placed there!
+ */
+#define DO_ONCE(func, ...)						     \
+	({								     \
+		bool ___ret = false;					     \
+		static bool ___done = false;				     \
+		static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \
+		if (static_key_true(&___once_key)) {			     \
+			unsigned long ___flags;				     \
+			___ret = __do_once_start(&___done, &___flags);	     \
+			if (unlikely(___ret)) {				     \
+				func(__VA_ARGS__);			     \
+				__do_once_done(&___done, &___once_key,	     \
+					       &___flags);		     \
+			}						     \
+		}							     \
+		___ret;							     \
+	})
+
+#define get_random_once(buf, nbytes)					     \
+	DO_ONCE(get_random_bytes, (buf), (nbytes))
+
+#endif /* _LINUX_ONCE_H */
diff --git a/include/linux/overflow-arith.h b/include/linux/overflow-arith.h
new file mode 100644
index 0000000..e12ccf8
--- /dev/null
+++ b/include/linux/overflow-arith.h
@@ -0,0 +1,18 @@
+#pragma once
+
+#include <linux/kernel.h>
+
+#ifdef CC_HAVE_BUILTIN_OVERFLOW
+
+#define overflow_usub __builtin_usub_overflow
+
+#else
+
+static inline bool overflow_usub(unsigned int a, unsigned int b,
+				 unsigned int *res)
+{
+	*res = a - b;
+	return *res > a ? true : false;
+}
+
+#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 4a4e3a0..05fde31 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -213,7 +213,9 @@
 void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
 struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
 int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum);
 int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
 
 
 #define PHY_INTERRUPT_DISABLED	0x0
@@ -798,6 +800,7 @@
 int phy_start_interrupts(struct phy_device *phydev);
 void phy_print_status(struct phy_device *phydev);
 void phy_device_free(struct phy_device *phydev);
+int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
 
 int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
 		       int (*run)(struct phy_device *));
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index 527a85c..c121ddf 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -74,11 +74,6 @@
 	struct serial_rs485	rs485;		/* rs485 settings */
 };
 
-/* CAN */
-struct at91_can_data {
-	void (*transceiver_switch)(int on);
-};
-
 /* FIXME: this needs a better location, but gets stuff building again */
 extern int at91_suspend_entering_slow_clock(void);
 
diff --git a/include/linux/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h
similarity index 100%
rename from include/linux/mdio-gpio.h
rename to include/linux/platform_data/mdio-gpio.h
diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h
index ac91707..a6f9d63 100644
--- a/include/linux/platform_data/nfcmrvl.h
+++ b/include/linux/platform_data/nfcmrvl.h
@@ -35,6 +35,14 @@
 	unsigned int flow_control;
 	/* Tell if firmware supports break control for power management */
 	unsigned int break_control;
+
+
+	/*
+	 * I2C specific
+	 */
+
+	unsigned int irq;
+	unsigned int irq_polarity;
 };
 
 #endif /* _NFCMRVL_PTF_H_ */
diff --git a/include/linux/platform_data/st-nci.h b/include/linux/platform_data/st-nci.h
index d9d400a..f6494b3 100644
--- a/include/linux/platform_data/st-nci.h
+++ b/include/linux/platform_data/st-nci.h
@@ -24,6 +24,8 @@
 struct st_nci_nfc_platform_data {
 	unsigned int gpio_reset;
 	unsigned int irq_polarity;
+	bool is_ese_present;
+	bool is_uicc_present;
 };
 
 #endif /* _ST_NCI_H_ */
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
new file mode 100644
index 0000000..6a43476
--- /dev/null
+++ b/include/linux/qed/common_hsi.h
@@ -0,0 +1,607 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __COMMON_HSI__
+#define __COMMON_HSI__
+
+#define FW_MAJOR_VERSION	8
+#define FW_MINOR_VERSION	4
+#define FW_REVISION_VERSION	2
+#define FW_ENGINEERING_VERSION	0
+
+/***********************/
+/* COMMON HW CONSTANTS */
+/***********************/
+
+/* PCI functions */
+#define MAX_NUM_PORTS_K2	(4)
+#define MAX_NUM_PORTS_BB	(2)
+#define MAX_NUM_PORTS		(MAX_NUM_PORTS_K2)
+
+#define MAX_NUM_PFS_K2	(16)
+#define MAX_NUM_PFS_BB	(8)
+#define MAX_NUM_PFS	(MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+
+#define MAX_NUM_VFS_K2	(192)
+#define MAX_NUM_VFS_BB	(120)
+#define MAX_NUM_VFS	(MAX_NUM_VFS_K2)
+
+#define MAX_NUM_FUNCTIONS_BB	(MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS	(MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_FUNCTION_NUMBER_BB	(MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER	(MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_NUM_VPORTS_K2	(208)
+#define MAX_NUM_VPORTS_BB	(160)
+#define MAX_NUM_VPORTS		(MAX_NUM_VPORTS_K2)
+
+#define MAX_NUM_L2_QUEUES_K2	(320)
+#define MAX_NUM_L2_QUEUES_BB	(256)
+#define MAX_NUM_L2_QUEUES	(MAX_NUM_L2_QUEUES_K2)
+
+/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
+#define NUM_PHYS_TCS_4PORT_K2	(4)
+#define NUM_OF_PHYS_TCS		(8)
+
+#define NUM_TCS_4PORT_K2	(NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_OF_TCS		(NUM_OF_PHYS_TCS + 1)
+
+#define LB_TC			(NUM_OF_PHYS_TCS)
+
+/* Num of possible traffic priority values */
+#define NUM_OF_PRIO		(8)
+
+#define MAX_NUM_VOQS_K2		(NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
+#define MAX_NUM_VOQS_BB		(NUM_OF_TCS * MAX_NUM_PORTS_BB)
+#define MAX_NUM_VOQS		(MAX_NUM_VOQS_K2)
+#define MAX_PHYS_VOQS		(NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
+
+/* CIDs */
+#define NUM_OF_CONNECTION_TYPES	(8)
+#define NUM_OF_LCIDS		(320)
+#define NUM_OF_LTIDS		(320)
+
+/*****************/
+/* CDU CONSTANTS */
+/*****************/
+
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT              (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK             (0x1ffff)
+
+/*****************/
+/* DQ CONSTANTS  */
+/*****************/
+
+/* DEMS */
+#define DQ_DEMS_LEGACY			0
+
+/* XCM agg val selection */
+#define DQ_XCM_AGG_VAL_SEL_WORD2  0
+#define DQ_XCM_AGG_VAL_SEL_WORD3  1
+#define DQ_XCM_AGG_VAL_SEL_WORD4  2
+#define DQ_XCM_AGG_VAL_SEL_WORD5  3
+#define DQ_XCM_AGG_VAL_SEL_REG3   4
+#define DQ_XCM_AGG_VAL_SEL_REG4   5
+#define DQ_XCM_AGG_VAL_SEL_REG5   6
+#define DQ_XCM_AGG_VAL_SEL_REG6   7
+
+/* XCM agg val selection */
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
+	DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD \
+	DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_CONS_CMD \
+	DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD \
+	DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_TX_BD_PROD_CMD \
+	DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD \
+	DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD            DQ_XCM_AGG_VAL_SEL_WORD5
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_AGG_FLG_SHIFT_BIT14  0
+#define DQ_XCM_AGG_FLG_SHIFT_BIT15  1
+#define DQ_XCM_AGG_FLG_SHIFT_CF12   2
+#define DQ_XCM_AGG_FLG_SHIFT_CF13   3
+#define DQ_XCM_AGG_FLG_SHIFT_CF18   4
+#define DQ_XCM_AGG_FLG_SHIFT_CF19   5
+#define DQ_XCM_AGG_FLG_SHIFT_CF22   6
+#define DQ_XCM_AGG_FLG_SHIFT_CF23   7
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_ETH_DQ_CF_CMD		(1 << \
+					DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_DQ_CF_CMD		(1 << \
+					DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD	(1 << \
+					DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_TERMINATE_CMD	(1 << \
+					DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD	(1 << \
+					DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_CORE_SLOW_PATH_CMD	(1 << \
+					DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD		(1 << \
+					DQ_XCM_AGG_FLG_SHIFT_CF23)
+
+/*****************/
+/* QM CONSTANTS  */
+/*****************/
+
+/* number of TX queues in the QM */
+#define MAX_QM_TX_QUEUES_K2	512
+#define MAX_QM_TX_QUEUES_BB	448
+#define MAX_QM_TX_QUEUES	MAX_QM_TX_QUEUES_K2
+
+/* number of Other queues in the QM */
+#define MAX_QM_OTHER_QUEUES_BB	64
+#define MAX_QM_OTHER_QUEUES_K2	128
+#define MAX_QM_OTHER_QUEUES	MAX_QM_OTHER_QUEUES_K2
+
+/* number of queues in a PF queue group */
+#define QM_PF_QUEUE_GROUP_SIZE	8
+
+/* base number of Tx PQs in the CM PQ representation.
+ * should be used when storing PQ IDs in CM PQ registers and context
+ */
+#define CM_TX_PQ_BASE	0x200
+
+/* QM registers data */
+#define QM_LINE_CRD_REG_WIDTH		16
+#define QM_LINE_CRD_REG_SIGN_BIT	(1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_WIDTH		24
+#define QM_BYTE_CRD_REG_SIGN_BIT	(1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_WIDTH		32
+#define QM_WFQ_CRD_REG_SIGN_BIT		(1 << (QM_WFQ_CRD_REG_WIDTH - 1))
+#define QM_RL_CRD_REG_WIDTH		32
+#define QM_RL_CRD_REG_SIGN_BIT		(1 << (QM_RL_CRD_REG_WIDTH - 1))
+
+/*****************/
+/* CAU CONSTANTS */
+/*****************/
+
+#define CAU_FSM_ETH_RX  0
+#define CAU_FSM_ETH_TX  1
+
+/* Number of Protocol Indices per Status Block */
+#define PIS_PER_SB    12
+
+#define CAU_HC_STOPPED_STATE	3
+#define CAU_HC_DISABLE_STATE	4
+#define CAU_HC_ENABLE_STATE	0
+
+/*****************/
+/* IGU CONSTANTS */
+/*****************/
+
+#define MAX_SB_PER_PATH_K2	(368)
+#define MAX_SB_PER_PATH_BB	(288)
+#define MAX_TOT_SB_PER_PATH \
+	MAX_SB_PER_PATH_K2
+
+#define MAX_SB_PER_PF_MIMD	129
+#define MAX_SB_PER_PF_SIMD	64
+#define MAX_SB_PER_VF		64
+
+/* Memory addresses on the BAR for the IGU Sub Block */
+#define IGU_MEM_BASE			0x0000
+
+#define IGU_MEM_MSIX_BASE		0x0000
+#define IGU_MEM_MSIX_UPPER		0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER	0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE		0x0200
+#define IGU_MEM_PBA_MSIX_UPPER		0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER	0x03ff
+
+#define IGU_CMD_INT_ACK_BASE		0x0400
+#define IGU_CMD_INT_ACK_UPPER		(IGU_CMD_INT_ACK_BASE +	\
+					 MAX_TOT_SB_PER_PATH -	\
+					 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER	0x05ff
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER	0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER	0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER	0x05f2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER		0x05f3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER	0x05f4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER	0x05f5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER		0x05f6
+
+#define IGU_CMD_PROD_UPD_BASE			0x0600
+#define IGU_CMD_PROD_UPD_UPPER			(IGU_CMD_PROD_UPD_BASE +\
+						 MAX_TOT_SB_PER_PATH - \
+						 1)
+#define IGU_CMD_PROD_UPD_RESERVED_UPPER		0x07ff
+
+/*****************/
+/* PXP CONSTANTS */
+/*****************/
+
+/* PTT and GTT */
+#define PXP_NUM_PF_WINDOWS		12
+#define PXP_PER_PF_ENTRY_SIZE		8
+#define PXP_NUM_GLOBAL_WINDOWS		243
+#define PXP_GLOBAL_ENTRY_SIZE		4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH	4
+#define PXP_PF_WINDOW_ADMIN_START	0
+#define PXP_PF_WINDOW_ADMIN_LENGTH	0x1000
+#define PXP_PF_WINDOW_ADMIN_END		(PXP_PF_WINDOW_ADMIN_START + \
+					 PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_START	0
+#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH	(PXP_NUM_PF_WINDOWS * \
+						 PXP_PER_PF_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END	(PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+					 PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_START	0x200
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH	(PXP_NUM_GLOBAL_WINDOWS * \
+						 PXP_GLOBAL_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
+		(PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
+		 PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
+#define PXP_PF_GLOBAL_PRETEND_ADDR	0x1f0
+#define PXP_PF_ME_OPAQUE_MASK_ADDR	0xf4
+#define PXP_PF_ME_OPAQUE_ADDR		0x1f8
+#define PXP_PF_ME_CONCRETE_ADDR		0x1fc
+
+#define PXP_EXTERNAL_BAR_PF_WINDOW_START	0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM		PXP_NUM_PF_WINDOWS
+#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE	0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
+	(PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
+	 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
+	(PXP_EXTERNAL_BAR_PF_WINDOW_START + \
+	 PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
+
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
+	(PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM		PXP_NUM_GLOBAL_WINDOWS
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE	0x1000
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
+	(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
+	 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
+	(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
+	 PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN	12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER	1024
+
+/* ILT Records */
+#define PXP_NUM_ILT_RECORDS_BB 7600
+#define PXP_NUM_ILT_RECORDS_K2 11000
+#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+
+/******************/
+/* PBF CONSTANTS  */
+/******************/
+
+/* Number of PBF command queue lines. Each line is 32B. */
+#define PBF_MAX_CMD_LINES 3328
+
+/* Number of BTB blocks. Each block is 256B. */
+#define BTB_MAX_BLOCKS 1440
+
+/*****************/
+/* PRS CONSTANTS */
+/*****************/
+
+/* Async data KCQ CQE */
+struct async_data {
+	__le32	cid;
+	__le16	itid;
+	u8	error_code;
+	u8	fw_debug_param;
+};
+
+struct regpair {
+	__le32	lo;
+	__le32	hi;
+};
+
+/* Event Data Union */
+union event_ring_data {
+	u8				bytes[8];
+	struct async_data		async_info;
+};
+
+/* Event Ring Entry */
+struct event_ring_entry {
+	u8			protocol_id;
+	u8			opcode;
+	__le16			reserved0;
+	__le16			echo;
+	u8			fw_return_code;
+	u8			flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK      0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT     0
+#define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+	union event_ring_data	data;
+};
+
+/* Multi function mode */
+enum mf_mode {
+	SF,
+	MF_OVLAN,
+	MF_NPAR,
+	MAX_MF_MODE
+};
+
+/* Per-protocol connection types */
+enum protocol_type {
+	PROTOCOLID_RESERVED1,
+	PROTOCOLID_RESERVED2,
+	PROTOCOLID_RESERVED3,
+	PROTOCOLID_CORE,
+	PROTOCOLID_ETH,
+	PROTOCOLID_RESERVED4,
+	PROTOCOLID_RESERVED5,
+	PROTOCOLID_PREROCE,
+	PROTOCOLID_COMMON,
+	PROTOCOLID_RESERVED6,
+	MAX_PROTOCOL_TYPE
+};
+
+/* status block structure */
+struct cau_pi_entry {
+	u32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK    0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT   0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK  0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+#define CAU_PI_ENTRY_FSM_SEL_MASK     0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT    23
+#define CAU_PI_ENTRY_RESERVED_MASK    0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT   24
+};
+
+/* status block structure */
+struct cau_sb_entry {
+	u32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK      0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT     0
+#define CAU_SB_ENTRY_STATE0_MASK       0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT      24
+#define CAU_SB_ENTRY_STATE1_MASK       0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT      28
+	u32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK  0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK  0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK   0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT  14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK   0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT  16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK    0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT   18
+#define CAU_SB_ENTRY_VF_VALID_MASK     0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT    26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK    0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT   27
+#define CAU_SB_ENTRY_TPH_MASK          0x1
+#define CAU_SB_ENTRY_TPH_SHIFT         31
+};
+
+/* core doorbell data */
+struct core_db_data {
+	u8 params;
+#define CORE_DB_DATA_DEST_MASK         0x3
+#define CORE_DB_DATA_DEST_SHIFT        0
+#define CORE_DB_DATA_AGG_CMD_MASK      0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT     2
+#define CORE_DB_DATA_BYPASS_EN_MASK    0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT   4
+#define CORE_DB_DATA_RESERVED_MASK     0x1
+#define CORE_DB_DATA_RESERVED_SHIFT    5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+	u8	agg_flags;
+	__le16	spq_prod;
+};
+
+/* Enum of doorbell aggregative command selection */
+enum db_agg_cmd_sel {
+	DB_AGG_CMD_NOP,
+	DB_AGG_CMD_SET,
+	DB_AGG_CMD_ADD,
+	DB_AGG_CMD_MAX,
+	MAX_DB_AGG_CMD_SEL
+};
+
+/* Enum of doorbell destination */
+enum db_dest {
+	DB_DEST_XCM,
+	DB_DEST_UCM,
+	DB_DEST_TCM,
+	DB_NUM_DESTINATIONS,
+	MAX_DB_DEST
+};
+
+/* Structure for doorbell address, in legacy mode */
+struct db_legacy_addr {
+	__le32 addr;
+#define DB_LEGACY_ADDR_RESERVED0_MASK  0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_ADDR_DEMS_MASK       0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT      2
+#define DB_LEGACY_ADDR_ICID_MASK       0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT      5
+};
+
+/* Igu interrupt command */
+enum igu_int_cmd {
+	IGU_INT_ENABLE	= 0,
+	IGU_INT_DISABLE = 1,
+	IGU_INT_NOP	= 2,
+	IGU_INT_NOP2	= 3,
+	MAX_IGU_INT_CMD
+};
+
+/* IGU producer or consumer update command */
+struct igu_prod_cons_update {
+	u32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK        0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT       0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK     0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT    24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK      0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT     25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK  0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK      0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT     28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK       0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT      29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK    0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT   31
+	u32 reserved1;
+};
+
+/* Igu segments access for default status block only */
+enum igu_seg_access {
+	IGU_SEG_ACCESS_REG	= 0,
+	IGU_SEG_ACCESS_ATTN	= 1,
+	MAX_IGU_SEG_ACCESS
+};
+
+struct parsing_and_err_flags {
+	__le16 flags;
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK                      0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT                     0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK                  0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT                 2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK                    0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT                   4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK               0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT              5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK        0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT       6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT                7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK           0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT          8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK                  0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT                 9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK                0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT               10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT                11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK         0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT        12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK            0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT           13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK  0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK          0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT         15
+};
+
+/* Concrete Function ID. */
+struct pxp_concrete_fid {
+	__le16 fid;
+#define PXP_CONCRETE_FID_PFID_MASK     0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT    0
+#define PXP_CONCRETE_FID_PORT_MASK     0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT    4
+#define PXP_CONCRETE_FID_PATH_MASK     0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT    6
+#define PXP_CONCRETE_FID_VFVALID_MASK  0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK     0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT    8
+};
+
+struct pxp_pretend_concrete_fid {
+	__le16 fid;
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK      0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT     0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK  0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK   0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT  7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK      0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT     8
+};
+
+union pxp_pretend_fid {
+	struct pxp_pretend_concrete_fid concrete_fid;
+	__le16				opaque_fid;
+};
+
+/* Pxp Pretend Command Register. */
+struct pxp_pretend_cmd {
+	union pxp_pretend_fid	fid;
+	__le16			control;
+#define PXP_PRETEND_CMD_PATH_MASK              0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT             0
+#define PXP_PRETEND_CMD_USE_PORT_MASK          0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT         1
+#define PXP_PRETEND_CMD_PORT_MASK              0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT             2
+#define PXP_PRETEND_CMD_RESERVED0_MASK         0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT        4
+#define PXP_PRETEND_CMD_RESERVED1_MASK         0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT        8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT     12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT     13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK  0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK       0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT      15
+};
+
+/* PTT Record in PXP Admin Window. */
+struct pxp_ptt_entry {
+	__le32			offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK     0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT    0
+#define PXP_PTT_ENTRY_RESERVED0_MASK  0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+	struct pxp_pretend_cmd	pretend;
+};
+
+/* RSS hash type */
+enum rss_hash_type {
+	RSS_HASH_TYPE_DEFAULT	= 0,
+	RSS_HASH_TYPE_IPV4	= 1,
+	RSS_HASH_TYPE_TCP_IPV4	= 2,
+	RSS_HASH_TYPE_IPV6	= 3,
+	RSS_HASH_TYPE_TCP_IPV6	= 4,
+	RSS_HASH_TYPE_UDP_IPV4	= 5,
+	RSS_HASH_TYPE_UDP_IPV6	= 6,
+	MAX_RSS_HASH_TYPE
+};
+
+/* status block structure */
+struct status_block {
+	__le16	pi_array[PIS_PER_SB];
+	__le32	sb_num;
+#define STATUS_BLOCK_SB_NUM_MASK      0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT     0
+#define STATUS_BLOCK_ZERO_PAD_MASK    0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT   9
+#define STATUS_BLOCK_ZERO_PAD2_MASK   0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT  16
+	__le32 prod_index;
+#define STATUS_BLOCK_PROD_INDEX_MASK  0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK   0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT  24
+};
+
+#endif /* __COMMON_HSI__ */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
new file mode 100644
index 0000000..320b337
--- /dev/null
+++ b/include/linux/qed/eth_common.h
@@ -0,0 +1,279 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef __ETH_COMMON__
+#define __ETH_COMMON__
+
+/********************/
+/* ETH FW CONSTANTS */
+/********************/
+#define ETH_CACHE_LINE_SIZE                 64
+
+#define ETH_MAX_RAMROD_PER_CON                          8
+#define ETH_TX_BD_PAGE_SIZE_BYTES                       4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES                       4096
+#define ETH_RX_SGE_PAGE_SIZE_BYTES                      4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES                      4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS                        2
+#define ETH_RX_NUM_NEXT_PAGE_SGES                       2
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT                          1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET                       18
+#define ETH_TX_MAX_LSO_HDR_NBD                                          4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT                                      3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT       3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT            2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE          2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN                  (9700 - (4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES                    510
+
+#define ETH_NUM_STATISTIC_COUNTERS                      MAX_NUM_VPORTS
+
+#define ETH_REG_CQE_PBL_SIZE                3
+
+/* num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS                                     512
+#define ETH_NUM_VLAN_FILTERS                            512
+
+/* approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED     0
+#define ETH_MULTICAST_MAC_BINS                          256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS          (ETH_MULTICAST_MAC_BINS / 32)
+
+/*  ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT                          10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM           128
+#define ETH_RSS_KEY_SIZE_REGS                       10
+#define ETH_RSS_ENGINE_NUM_K2               207
+#define ETH_RSS_ENGINE_NUM_BB               127
+
+/* TPA constants */
+#define ETH_TPA_MAX_AGGS_NUM              64
+#define ETH_TPA_CQE_START_SGL_SIZE        3
+#define ETH_TPA_CQE_CONT_SGL_SIZE         6
+#define ETH_TPA_CQE_END_SGL_SIZE          4
+
+/* Queue Zone sizes */
+#define TSTORM_QZONE_SIZE    0
+#define MSTORM_QZONE_SIZE    sizeof(struct mstorm_eth_queue_zone)
+#define USTORM_QZONE_SIZE    sizeof(struct ustorm_eth_queue_zone)
+#define XSTORM_QZONE_SIZE    0
+#define YSTORM_QZONE_SIZE    sizeof(struct ystorm_eth_queue_zone)
+#define PSTORM_QZONE_SIZE    0
+
+/* Interrupt coalescing TimeSet */
+struct coalescing_timeset {
+	u8	timeset;
+	u8	valid;
+};
+
+struct eth_tx_1st_bd_flags {
+	u8 bitfields;
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK  0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK          0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT         1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK          0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT         2
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK   0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT  3
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK              0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT             4
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK         0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT        5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK     0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT    6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK     0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT    7
+};
+
+/* The parsing information data fo rthe first tx bd of a given packet. */
+struct eth_tx_data_1st_bd {
+	__le16				vlan;
+	u8				nbds;
+	struct eth_tx_1st_bd_flags	bd_flags;
+	__le16				fw_use_only;
+};
+
+/* The parsing information data for the second tx bd of a given packet. */
+struct eth_tx_data_2nd_bd {
+	__le16	tunn_ip_size;
+	__le16	bitfields;
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK     0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT    0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK                 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT                13
+	__le16	bitfields2;
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK  0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK       0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT      4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK            0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT           6
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK                 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT                8
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK           0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT          10
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK                  0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT                 11
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK             0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT            12
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK                    0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT                   13
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK       0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT      14
+#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK                 0x1
+#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT                15
+};
+
+/* Regular ETH Rx FP CQE. */
+struct eth_fast_path_rx_reg_cqe {
+	u8	type;
+	u8	bitfields;
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK  0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK             0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT            3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK      0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT     7
+	__le16				pkt_len;
+	struct parsing_and_err_flags	pars_flags;
+	__le16				vlan_tag;
+	__le32				rss_hash;
+	__le16				len_on_bd;
+	u8				placement_offset;
+	u8				reserved;
+	__le16				pbl[ETH_REG_CQE_PBL_SIZE];
+	u8				reserved1[10];
+};
+
+/* The L4 pseudo checksum mode for Ethernet */
+enum eth_l4_pseudo_checksum_mode {
+	ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+	ETH_L4_PSEUDO_CSUM_ZERO_LENGTH,
+	MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct eth_rx_bd {
+	struct regpair addr;
+};
+
+/* regular ETH Rx SP CQE */
+struct eth_slow_path_rx_cqe {
+	u8	type;
+	u8	ramrod_cmd_id;
+	u8	error_flag;
+	u8	reserved[27];
+	__le16	echo;
+};
+
+/* union for all ETH Rx CQE types */
+union eth_rx_cqe {
+	struct eth_fast_path_rx_reg_cqe		fast_path_regular;
+	struct eth_slow_path_rx_cqe		slow_path;
+};
+
+/* ETH Rx CQE type */
+enum eth_rx_cqe_type {
+	ETH_RX_CQE_TYPE_UNUSED,
+	ETH_RX_CQE_TYPE_REGULAR,
+	ETH_RX_CQE_TYPE_SLOW_PATH,
+	MAX_ETH_RX_CQE_TYPE
+};
+
+/* ETH Rx producers data */
+struct eth_rx_prod_data {
+	__le16	bd_prod;
+	__le16	sge_prod;
+	__le16	cqe_prod;
+	__le16	reserved;
+};
+
+/* The first tx bd of a given packet */
+struct eth_tx_1st_bd {
+	struct regpair			addr;
+	__le16				nbytes;
+	struct eth_tx_data_1st_bd	data;
+};
+
+/* The second tx bd of a given packet */
+struct eth_tx_2nd_bd {
+	struct regpair			addr;
+	__le16				nbytes;
+	struct eth_tx_data_2nd_bd	data;
+};
+
+/* The parsing information data for the third tx bd of a given packet. */
+struct eth_tx_data_3rd_bd {
+	__le16	lso_mss;
+	u8	bitfields;
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK  0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK         0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT        4
+	u8	resereved0[3];
+};
+
+/* The third tx bd of a given packet */
+struct eth_tx_3rd_bd {
+	struct regpair			addr;
+	__le16				nbytes;
+	struct eth_tx_data_3rd_bd	data;
+};
+
+/* The common non-special TX BD ring element */
+struct eth_tx_bd {
+	struct regpair	addr;
+	__le16		nbytes;
+	__le16		reserved0;
+	__le32		reserved1;
+};
+
+union eth_tx_bd_types {
+	struct eth_tx_1st_bd	first_bd;
+	struct eth_tx_2nd_bd	second_bd;
+	struct eth_tx_3rd_bd	third_bd;
+	struct eth_tx_bd	reg_bd;
+};
+
+/* Mstorm Queue Zone */
+struct mstorm_eth_queue_zone {
+	struct eth_rx_prod_data rx_producers;
+	__le32			reserved[2];
+};
+
+/* Ustorm Queue Zone */
+struct ustorm_eth_queue_zone {
+	struct coalescing_timeset	int_coalescing_timeset;
+	__le16				reserved[3];
+};
+
+/* Ystorm Queue Zone */
+struct ystorm_eth_queue_zone {
+	struct coalescing_timeset	int_coalescing_timeset;
+	__le16				reserved[3];
+};
+
+/* ETH doorbell data */
+struct eth_db_data {
+	u8 params;
+#define ETH_DB_DATA_DEST_MASK         0x3
+#define ETH_DB_DATA_DEST_SHIFT        0
+#define ETH_DB_DATA_AGG_CMD_MASK      0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT     2
+#define ETH_DB_DATA_BYPASS_EN_MASK    0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT   4
+#define ETH_DB_DATA_RESERVED_MASK     0x1
+#define ETH_DB_DATA_RESERVED_SHIFT    5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+	u8	agg_flags;
+	__le16	bd_prod;
+};
+
+#endif /* __ETH_COMMON__ */
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
new file mode 100644
index 0000000..b920c36
--- /dev/null
+++ b/include/linux/qed/qed_chain.h
@@ -0,0 +1,539 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_CHAIN_H
+#define _QED_CHAIN_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+
+/* dma_addr_t manip */
+#define DMA_LO_LE(x)            cpu_to_le32(lower_32_bits(x))
+#define DMA_HI_LE(x)            cpu_to_le32(upper_32_bits(x))
+
+#define HILO_GEN(hi, lo, type)  ((((type)(hi)) << 32) + (lo))
+#define HILO_DMA(hi, lo)        HILO_GEN(hi, lo, dma_addr_t)
+#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64)
+#define HILO_DMA_REGPAIR(regpair)       (HILO_DMA(regpair.hi, regpair.lo))
+#define HILO_64_REGPAIR(regpair)        (HILO_64(regpair.hi, regpair.lo))
+
+enum qed_chain_mode {
+	/* Each Page contains a next pointer at its end */
+	QED_CHAIN_MODE_NEXT_PTR,
+
+	/* Chain is a single page (next ptr) is unrequired */
+	QED_CHAIN_MODE_SINGLE,
+
+	/* Page pointers are located in a side list */
+	QED_CHAIN_MODE_PBL,
+};
+
+enum qed_chain_use_mode {
+	QED_CHAIN_USE_TO_PRODUCE,		/* Chain starts empty */
+	QED_CHAIN_USE_TO_CONSUME,		/* Chain starts full */
+	QED_CHAIN_USE_TO_CONSUME_PRODUCE,	/* Chain starts empty */
+};
+
+struct qed_chain_next {
+	struct regpair	next_phys;
+	void		*next_virt;
+};
+
+struct qed_chain_pbl {
+	dma_addr_t	p_phys_table;
+	void		*p_virt_table;
+	u16		prod_page_idx;
+	u16		cons_page_idx;
+};
+
+struct qed_chain {
+	void			*p_virt_addr;
+	dma_addr_t		p_phys_addr;
+	void			*p_prod_elem;
+	void			*p_cons_elem;
+	u16			page_cnt;
+	enum qed_chain_mode	mode;
+	enum qed_chain_use_mode intended_use; /* used to produce/consume */
+	u16			capacity; /*< number of _usable_ elements */
+	u16			size; /* number of elements */
+	u16			prod_idx;
+	u16			cons_idx;
+	u16			elem_per_page;
+	u16			elem_per_page_mask;
+	u16			elem_unusable;
+	u16			usable_per_page;
+	u16			elem_size;
+	u16			next_page_mask;
+	struct qed_chain_pbl	pbl;
+};
+
+#define QED_CHAIN_PBL_ENTRY_SIZE        (8)
+#define QED_CHAIN_PAGE_SIZE             (0x1000)
+#define ELEMS_PER_PAGE(elem_size)       (QED_CHAIN_PAGE_SIZE / (elem_size))
+
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)     \
+	((mode == QED_CHAIN_MODE_NEXT_PTR) ?	     \
+	 (1 + ((sizeof(struct qed_chain_next) - 1) / \
+	       (elem_size))) : 0)
+
+#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
+	((u32)(ELEMS_PER_PAGE(elem_size) -     \
+	       UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+
+#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
+	DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+
+/* Accessors */
+static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
+{
+	return p_chain->prod_idx;
+}
+
+static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
+{
+	return p_chain->cons_idx;
+}
+
+static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
+{
+	u16 used;
+
+	/* we don't need to trancate upon assignmet, as we assign u32->u16 */
+	used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) -
+		(u32)p_chain->cons_idx;
+	if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+		used -= (used / p_chain->elem_per_page);
+
+	return p_chain->capacity - used;
+}
+
+static inline u8 qed_chain_is_full(struct qed_chain *p_chain)
+{
+	return qed_chain_get_elem_left(p_chain) == p_chain->capacity;
+}
+
+static inline u8 qed_chain_is_empty(struct qed_chain *p_chain)
+{
+	return qed_chain_get_elem_left(p_chain) == 0;
+}
+
+static inline u16 qed_chain_get_elem_per_page(
+	struct qed_chain *p_chain)
+{
+	return p_chain->elem_per_page;
+}
+
+static inline u16 qed_chain_get_usable_per_page(
+	struct qed_chain *p_chain)
+{
+	return p_chain->usable_per_page;
+}
+
+static inline u16 qed_chain_get_unusable_per_page(
+	struct qed_chain *p_chain)
+{
+	return p_chain->elem_unusable;
+}
+
+static inline u16 qed_chain_get_size(struct qed_chain *p_chain)
+{
+	return p_chain->size;
+}
+
+static inline dma_addr_t
+qed_chain_get_pbl_phys(struct qed_chain *p_chain)
+{
+	return p_chain->pbl.p_phys_table;
+}
+
+/**
+ * @brief qed_chain_advance_page -
+ *
+ * Advance the next element accros pages for a linked chain
+ *
+ * @param p_chain
+ * @param p_next_elem
+ * @param idx_to_inc
+ * @param page_to_inc
+ */
+static inline void
+qed_chain_advance_page(struct qed_chain *p_chain,
+		       void **p_next_elem,
+		       u16 *idx_to_inc,
+		       u16 *page_to_inc)
+
+{
+	switch (p_chain->mode) {
+	case QED_CHAIN_MODE_NEXT_PTR:
+	{
+		struct qed_chain_next *p_next = *p_next_elem;
+		*p_next_elem = p_next->next_virt;
+		*idx_to_inc += p_chain->elem_unusable;
+		break;
+	}
+	case QED_CHAIN_MODE_SINGLE:
+		*p_next_elem = p_chain->p_virt_addr;
+		break;
+
+	case QED_CHAIN_MODE_PBL:
+		/* It is assumed pages are sequential, next element needs
+		 * to change only when passing going back to first from last.
+		 */
+		if (++(*page_to_inc) == p_chain->page_cnt) {
+			*page_to_inc = 0;
+			*p_next_elem = p_chain->p_virt_addr;
+		}
+	}
+}
+
+#define is_unusable_idx(p, idx)	\
+	(((p)->idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_next_idx(p, idx) \
+	((((p)->idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define test_ans_skip(p, idx)				\
+	do {						\
+		if (is_unusable_idx(p, idx)) {		\
+			(p)->idx += (p)->elem_unusable;	\
+		}					\
+	} while (0)
+
+/**
+ * @brief qed_chain_return_multi_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ * @param num
+ */
+static inline void
+qed_chain_return_multi_produced(struct qed_chain *p_chain,
+				u16 num)
+{
+	p_chain->cons_idx += num;
+	test_ans_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief qed_chain_return_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ */
+static inline void qed_chain_return_produced(struct qed_chain *p_chain)
+{
+	p_chain->cons_idx++;
+	test_ans_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief qed_chain_produce -
+ *
+ * A chain in which the driver "Produces" elements should use this to get
+ * a pointer to the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for new element.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to next element
+ */
+static inline void *qed_chain_produce(struct qed_chain *p_chain)
+{
+	void *ret = NULL;
+
+	if ((p_chain->prod_idx & p_chain->elem_per_page_mask) ==
+	    p_chain->next_page_mask) {
+		qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+				       &p_chain->prod_idx,
+				       &p_chain->pbl.prod_page_idx);
+	}
+
+	ret = p_chain->p_prod_elem;
+	p_chain->prod_idx++;
+	p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
+					p_chain->elem_size);
+
+	return ret;
+}
+
+/**
+ * @brief qed_chain_get_capacity -
+ *
+ * Get the maximum number of BDs in chain
+ *
+ * @param p_chain
+ * @param num
+ *
+ * @return u16, number of unusable BDs
+ */
+static inline u16 qed_chain_get_capacity(struct qed_chain *p_chain)
+{
+	return p_chain->capacity;
+}
+
+/**
+ * @brief qed_chain_recycle_consumed -
+ *
+ * Returns an element which was previously consumed;
+ * Increments producers so they could be written to FW.
+ *
+ * @param p_chain
+ */
+static inline void
+qed_chain_recycle_consumed(struct qed_chain *p_chain)
+{
+	test_ans_skip(p_chain, prod_idx);
+	p_chain->prod_idx++;
+}
+
+/**
+ * @brief qed_chain_consume -
+ *
+ * A Chain in which the driver utilizes data written by a different source
+ * (i.e., FW) should use this to access passed buffers.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to the next buffer written
+ */
+static inline void *qed_chain_consume(struct qed_chain *p_chain)
+{
+	void *ret = NULL;
+
+	if ((p_chain->cons_idx & p_chain->elem_per_page_mask) ==
+	    p_chain->next_page_mask) {
+		qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+				       &p_chain->cons_idx,
+				       &p_chain->pbl.cons_page_idx);
+	}
+
+	ret = p_chain->p_cons_elem;
+	p_chain->cons_idx++;
+	p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
+					p_chain->elem_size);
+
+	return ret;
+}
+
+/**
+ * @brief qed_chain_reset - Resets the chain to its start state
+ *
+ * @param p_chain pointer to a previously allocted chain
+ */
+static inline void qed_chain_reset(struct qed_chain *p_chain)
+{
+	int i;
+
+	p_chain->prod_idx	= 0;
+	p_chain->cons_idx	= 0;
+	p_chain->p_cons_elem	= p_chain->p_virt_addr;
+	p_chain->p_prod_elem	= p_chain->p_virt_addr;
+
+	if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+		p_chain->pbl.prod_page_idx	= p_chain->page_cnt - 1;
+		p_chain->pbl.cons_page_idx	= p_chain->page_cnt - 1;
+	}
+
+	switch (p_chain->intended_use) {
+	case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
+	case QED_CHAIN_USE_TO_PRODUCE:
+		/* Do nothing */
+		break;
+
+	case QED_CHAIN_USE_TO_CONSUME:
+		/* produce empty elements */
+		for (i = 0; i < p_chain->capacity; i++)
+			qed_chain_recycle_consumed(p_chain);
+		break;
+	}
+}
+
+/**
+ * @brief qed_chain_init - Initalizes a basic chain struct
+ *
+ * @param p_chain
+ * @param p_virt_addr
+ * @param p_phys_addr	physical address of allocated buffer's beginning
+ * @param page_cnt	number of pages in the allocated buffer
+ * @param elem_size	size of each element in the chain
+ * @param intended_use
+ * @param mode
+ */
+static inline void qed_chain_init(struct qed_chain *p_chain,
+				  void *p_virt_addr,
+				  dma_addr_t p_phys_addr,
+				  u16 page_cnt,
+				  u8 elem_size,
+				  enum qed_chain_use_mode intended_use,
+				  enum qed_chain_mode mode)
+{
+	/* chain fixed parameters */
+	p_chain->p_virt_addr	= p_virt_addr;
+	p_chain->p_phys_addr	= p_phys_addr;
+	p_chain->elem_size	= elem_size;
+	p_chain->page_cnt	= page_cnt;
+	p_chain->mode		= mode;
+
+	p_chain->intended_use		= intended_use;
+	p_chain->elem_per_page		= ELEMS_PER_PAGE(elem_size);
+	p_chain->usable_per_page =
+		USABLE_ELEMS_PER_PAGE(elem_size, mode);
+	p_chain->capacity		= p_chain->usable_per_page * page_cnt;
+	p_chain->size			= p_chain->elem_per_page * page_cnt;
+	p_chain->elem_per_page_mask	= p_chain->elem_per_page - 1;
+
+	p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
+
+	p_chain->next_page_mask = (p_chain->usable_per_page &
+				   p_chain->elem_per_page_mask);
+
+	if (mode == QED_CHAIN_MODE_NEXT_PTR) {
+		struct qed_chain_next	*p_next;
+		u16			i;
+
+		for (i = 0; i < page_cnt - 1; i++) {
+			/* Increment mem_phy to the next page. */
+			p_phys_addr += QED_CHAIN_PAGE_SIZE;
+
+			/* Initialize the physical address of the next page. */
+			p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
+							   elem_size *
+							   p_chain->
+							   usable_per_page);
+
+			p_next->next_phys.lo	= DMA_LO_LE(p_phys_addr);
+			p_next->next_phys.hi	= DMA_HI_LE(p_phys_addr);
+
+			/* Initialize the virtual address of the next page. */
+			p_next->next_virt = (void *)((u8 *)p_virt_addr +
+						     QED_CHAIN_PAGE_SIZE);
+
+			/* Move to the next page. */
+			p_virt_addr = p_next->next_virt;
+		}
+
+		/* Last page's next should point to beginning of the chain */
+		p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
+						   elem_size *
+						   p_chain->usable_per_page);
+
+		p_next->next_phys.lo	= DMA_LO_LE(p_chain->p_phys_addr);
+		p_next->next_phys.hi	= DMA_HI_LE(p_chain->p_phys_addr);
+		p_next->next_virt	= p_chain->p_virt_addr;
+	}
+	qed_chain_reset(p_chain);
+}
+
+/**
+ * @brief qed_chain_pbl_init - Initalizes a basic pbl chain
+ *        struct
+ * @param p_chain
+ * @param p_virt_addr	virtual address of allocated buffer's beginning
+ * @param p_phys_addr	physical address of allocated buffer's beginning
+ * @param page_cnt	number of pages in the allocated buffer
+ * @param elem_size	size of each element in the chain
+ * @param use_mode
+ * @param p_phys_pbl	pointer to a pre-allocated side table
+ *                      which will hold physical page addresses.
+ * @param p_virt_pbl	pointer to a pre allocated side table
+ *                      which will hold virtual page addresses.
+ */
+static inline void
+qed_chain_pbl_init(struct qed_chain *p_chain,
+		   void *p_virt_addr,
+		   dma_addr_t p_phys_addr,
+		   u16 page_cnt,
+		   u8 elem_size,
+		   enum qed_chain_use_mode use_mode,
+		   dma_addr_t p_phys_pbl,
+		   dma_addr_t *p_virt_pbl)
+{
+	dma_addr_t *p_pbl_dma = p_virt_pbl;
+	int i;
+
+	qed_chain_init(p_chain, p_virt_addr, p_phys_addr, page_cnt,
+		       elem_size, use_mode, QED_CHAIN_MODE_PBL);
+
+	p_chain->pbl.p_phys_table = p_phys_pbl;
+	p_chain->pbl.p_virt_table = p_virt_pbl;
+
+	/* Fill the PBL with physical addresses*/
+	for (i = 0; i < page_cnt; i++) {
+		*p_pbl_dma = p_phys_addr;
+		p_phys_addr += QED_CHAIN_PAGE_SIZE;
+		p_pbl_dma++;
+	}
+}
+
+/**
+ * @brief qed_chain_set_prod - sets the prod to the given
+ *        value
+ *
+ * @param prod_idx
+ * @param p_prod_elem
+ */
+static inline void qed_chain_set_prod(struct qed_chain *p_chain,
+				      u16 prod_idx,
+				      void *p_prod_elem)
+{
+	p_chain->prod_idx	= prod_idx;
+	p_chain->p_prod_elem	= p_prod_elem;
+}
+
+/**
+ * @brief qed_chain_get_elem -
+ *
+ * get a pointer to an element represented by absolute idx
+ *
+ * @param p_chain
+ * @assumption p_chain->size is a power of 2
+ *
+ * @return void*, a pointer to next element
+ */
+static inline void *qed_chain_sge_get_elem(struct qed_chain *p_chain,
+					   u16 idx)
+{
+	void *ret = NULL;
+
+	if (idx >= p_chain->size)
+		return NULL;
+
+	ret = (u8 *)p_chain->p_virt_addr + p_chain->elem_size * idx;
+
+	return ret;
+}
+
+/**
+ * @brief qed_chain_sge_inc_cons_prod
+ *
+ * for sge chains, producer isn't increased serially, the ring
+ * is expected to be full at all times. Once elements are
+ * consumed, they are immediately produced.
+ *
+ * @param p_chain
+ * @param cnt
+ *
+ * @return inline void
+ */
+static inline void
+qed_chain_sge_inc_cons_prod(struct qed_chain *p_chain,
+			    u16 cnt)
+{
+	p_chain->prod_idx += cnt;
+	p_chain->cons_idx += cnt;
+}
+
+#endif
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
new file mode 100644
index 0000000..81ab178
--- /dev/null
+++ b/include/linux/qed/qed_eth_if.h
@@ -0,0 +1,165 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_ETH_IF_H
+#define _QED_ETH_IF_H
+
+#include <linux/list.h>
+#include <linux/if_link.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+
+struct qed_dev_eth_info {
+	struct qed_dev_info common;
+
+	u8	num_queues;
+	u8	num_tc;
+
+	u8	port_mac[ETH_ALEN];
+	u8	num_vlan_filters;
+};
+
+struct qed_update_vport_rss_params {
+	u16	rss_ind_table[128];
+	u32	rss_key[10];
+};
+
+struct qed_update_vport_params {
+	u8 vport_id;
+	u8 update_vport_active_flg;
+	u8 vport_active_flg;
+	u8 update_rss_flg;
+	struct qed_update_vport_rss_params rss_params;
+};
+
+struct qed_stop_rxq_params {
+	u8 rss_id;
+	u8 rx_queue_id;
+	u8 vport_id;
+	bool eq_completion_only;
+};
+
+struct qed_stop_txq_params {
+	u8 rss_id;
+	u8 tx_queue_id;
+};
+
+enum qed_filter_rx_mode_type {
+	QED_FILTER_RX_MODE_TYPE_REGULAR,
+	QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+	QED_FILTER_RX_MODE_TYPE_PROMISC,
+};
+
+enum qed_filter_xcast_params_type {
+	QED_FILTER_XCAST_TYPE_ADD,
+	QED_FILTER_XCAST_TYPE_DEL,
+	QED_FILTER_XCAST_TYPE_REPLACE,
+};
+
+struct qed_filter_ucast_params {
+	enum qed_filter_xcast_params_type type;
+	u8 vlan_valid;
+	u16 vlan;
+	u8 mac_valid;
+	unsigned char mac[ETH_ALEN];
+};
+
+struct qed_filter_mcast_params {
+	enum qed_filter_xcast_params_type type;
+	u8 num;
+	unsigned char mac[64][ETH_ALEN];
+};
+
+union qed_filter_type_params {
+	enum qed_filter_rx_mode_type accept_flags;
+	struct qed_filter_ucast_params ucast;
+	struct qed_filter_mcast_params mcast;
+};
+
+enum qed_filter_type {
+	QED_FILTER_TYPE_UCAST,
+	QED_FILTER_TYPE_MCAST,
+	QED_FILTER_TYPE_RX_MODE,
+	QED_MAX_FILTER_TYPES,
+};
+
+struct qed_filter_params {
+	enum qed_filter_type type;
+	union qed_filter_type_params filter;
+};
+
+struct qed_queue_start_common_params {
+	u8 rss_id;
+	u8 queue_id;
+	u8 vport_id;
+	u16 sb;
+	u16 sb_idx;
+};
+
+struct qed_eth_cb_ops {
+	struct qed_common_cb_ops common;
+};
+
+struct qed_eth_ops {
+	const struct qed_common_ops *common;
+
+	int (*fill_dev_info)(struct qed_dev *cdev,
+			     struct qed_dev_eth_info *info);
+
+	void (*register_ops)(struct qed_dev *cdev,
+			     struct qed_eth_cb_ops *ops,
+			     void *cookie);
+
+	int (*vport_start)(struct qed_dev *cdev,
+			   u8 vport_id, u16 mtu,
+			   u8 drop_ttl0_flg,
+			   u8 inner_vlan_removal_en_flg);
+
+	int (*vport_stop)(struct qed_dev *cdev,
+			  u8 vport_id);
+
+	int (*vport_update)(struct qed_dev *cdev,
+			    struct qed_update_vport_params *params);
+
+	int (*q_rx_start)(struct qed_dev *cdev,
+			  struct qed_queue_start_common_params *params,
+			  u16 bd_max_bytes,
+			  dma_addr_t bd_chain_phys_addr,
+			  dma_addr_t cqe_pbl_addr,
+			  u16 cqe_pbl_size,
+			  void __iomem **pp_prod);
+
+	int (*q_rx_stop)(struct qed_dev *cdev,
+			 struct qed_stop_rxq_params *params);
+
+	int (*q_tx_start)(struct qed_dev *cdev,
+			  struct qed_queue_start_common_params *params,
+			  dma_addr_t pbl_addr,
+			  u16 pbl_size,
+			  void __iomem **pp_doorbell);
+
+	int (*q_tx_stop)(struct qed_dev *cdev,
+			 struct qed_stop_txq_params *params);
+
+	int (*filter_config)(struct qed_dev *cdev,
+			     struct qed_filter_params *params);
+
+	int (*fastpath_stop)(struct qed_dev *cdev);
+
+	int (*eth_cqe_completion)(struct qed_dev *cdev,
+				  u8 rss_id,
+				  struct eth_slow_path_rx_cqe *cqe);
+
+	void (*get_vport_stats)(struct qed_dev *cdev,
+				struct qed_eth_stats *stats);
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(u32 version);
+void qed_put_eth_ops(void);
+
+#endif
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
new file mode 100644
index 0000000..dc9a1353
--- /dev/null
+++ b/include/linux/qed/qed_if.h
@@ -0,0 +1,498 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_IF_H
+#define _QED_IF_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/qed_chain.h>
+
+#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
+					    (void __iomem *)(reg_addr))
+
+#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
+
+#define QED_COALESCE_MAX 0xFF
+
+/* forward */
+struct qed_dev;
+
+struct qed_eth_pf_params {
+	/* The following parameters are used during HW-init
+	 * and these parameters need to be passed as arguments
+	 * to update_pf_params routine invoked before slowpath start
+	 */
+	u16 num_cons;
+};
+
+struct qed_pf_params {
+	struct qed_eth_pf_params eth_pf_params;
+};
+
+enum qed_int_mode {
+	QED_INT_MODE_INTA,
+	QED_INT_MODE_MSIX,
+	QED_INT_MODE_MSI,
+	QED_INT_MODE_POLL,
+};
+
+struct qed_sb_info {
+	struct status_block	*sb_virt;
+	dma_addr_t		sb_phys;
+	u32			sb_ack; /* Last given ack */
+	u16			igu_sb_id;
+	void __iomem		*igu_addr;
+	u8			flags;
+#define QED_SB_INFO_INIT        0x1
+#define QED_SB_INFO_SETUP       0x2
+
+	struct qed_dev		*cdev;
+};
+
+struct qed_dev_info {
+	unsigned long	pci_mem_start;
+	unsigned long	pci_mem_end;
+	unsigned int	pci_irq;
+	u8		num_hwfns;
+
+	u8		hw_mac[ETH_ALEN];
+	bool		is_mf;
+
+	/* FW version */
+	u16		fw_major;
+	u16		fw_minor;
+	u16		fw_rev;
+	u16		fw_eng;
+
+	/* MFW version */
+	u32		mfw_rev;
+
+	u32		flash_size;
+	u8		mf_mode;
+};
+
+enum qed_sb_type {
+	QED_SB_TYPE_L2_QUEUE,
+};
+
+enum qed_protocol {
+	QED_PROTOCOL_ETH,
+};
+
+struct qed_link_params {
+	bool	link_up;
+
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG         BIT(0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      BIT(1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    BIT(2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG          BIT(3)
+	u32	override_flags;
+	bool	autoneg;
+	u32	adv_speeds;
+	u32	forced_speed;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE           BIT(0)
+#define QED_LINK_PAUSE_RX_ENABLE                BIT(1)
+#define QED_LINK_PAUSE_TX_ENABLE                BIT(2)
+	u32	pause_config;
+};
+
+struct qed_link_output {
+	bool	link_up;
+
+	u32	supported_caps;         /* In SUPPORTED defs */
+	u32	advertised_caps;        /* In ADVERTISED defs */
+	u32	lp_caps;                /* In ADVERTISED defs */
+	u32	speed;                  /* In Mb/s */
+	u8	duplex;                 /* In DUPLEX defs */
+	u8	port;                   /* In PORT defs */
+	bool	autoneg;
+	u32	pause_config;
+};
+
+#define QED_DRV_VER_STR_SIZE 12
+struct qed_slowpath_params {
+	u32	int_mode;
+	u8	drv_major;
+	u8	drv_minor;
+	u8	drv_rev;
+	u8	drv_eng;
+	u8	name[QED_DRV_VER_STR_SIZE];
+};
+
+#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
+
+struct qed_int_info {
+	struct msix_entry	*msix;
+	u8			msix_cnt;
+
+	/* This should be updated by the protocol driver */
+	u8			used_cnt;
+};
+
+struct qed_common_cb_ops {
+	void	(*link_update)(void			*dev,
+			       struct qed_link_output	*link);
+};
+
+struct qed_common_ops {
+	struct qed_dev*	(*probe)(struct pci_dev *dev,
+				 enum qed_protocol protocol,
+				 u32 dp_module, u8 dp_level);
+
+	void		(*remove)(struct qed_dev *cdev);
+
+	int		(*set_power_state)(struct qed_dev *cdev,
+					   pci_power_t state);
+
+	void		(*set_id)(struct qed_dev *cdev,
+				  char name[],
+				  char ver_str[]);
+
+	/* Client drivers need to make this call before slowpath_start.
+	 * PF params required for the call before slowpath_start is
+	 * documented within the qed_pf_params structure definition.
+	 */
+	void		(*update_pf_params)(struct qed_dev *cdev,
+					    struct qed_pf_params *params);
+	int		(*slowpath_start)(struct qed_dev *cdev,
+					  struct qed_slowpath_params *params);
+
+	int		(*slowpath_stop)(struct qed_dev *cdev);
+
+	/* Requests to use `cnt' interrupts for fastpath.
+	 * upon success, returns number of interrupts allocated for fastpath.
+	 */
+	int		(*set_fp_int)(struct qed_dev *cdev,
+				      u16 cnt);
+
+	/* Fills `info' with pointers required for utilizing interrupts */
+	int		(*get_fp_int)(struct qed_dev *cdev,
+				      struct qed_int_info *info);
+
+	u32		(*sb_init)(struct qed_dev *cdev,
+				   struct qed_sb_info *sb_info,
+				   void *sb_virt_addr,
+				   dma_addr_t sb_phy_addr,
+				   u16 sb_id,
+				   enum qed_sb_type type);
+
+	u32		(*sb_release)(struct qed_dev *cdev,
+				      struct qed_sb_info *sb_info,
+				      u16 sb_id);
+
+	void		(*simd_handler_config)(struct qed_dev *cdev,
+					       void *token,
+					       int index,
+					       void (*handler)(void *));
+
+	void		(*simd_handler_clean)(struct qed_dev *cdev,
+					      int index);
+/**
+ * @brief set_link - set links according to params
+ *
+ * @param cdev
+ * @param params - values used to override the default link configuration
+ *
+ * @return 0 on success, error otherwise.
+ */
+	int		(*set_link)(struct qed_dev *cdev,
+				    struct qed_link_params *params);
+
+/**
+ * @brief get_link - returns the current link state.
+ *
+ * @param cdev
+ * @param if_link - structure to be filled with current link configuration.
+ */
+	void		(*get_link)(struct qed_dev *cdev,
+				    struct qed_link_output *if_link);
+
+/**
+ * @brief - drains chip in case Tx completions fail to arrive due to pause.
+ *
+ * @param cdev
+ */
+	int		(*drain)(struct qed_dev *cdev);
+
+/**
+ * @brief update_msglvl - update module debug level
+ *
+ * @param cdev
+ * @param dp_module
+ * @param dp_level
+ */
+	void		(*update_msglvl)(struct qed_dev *cdev,
+					 u32 dp_module,
+					 u8 dp_level);
+
+	int		(*chain_alloc)(struct qed_dev *cdev,
+				       enum qed_chain_use_mode intended_use,
+				       enum qed_chain_mode mode,
+				       u16 num_elems,
+				       size_t elem_size,
+				       struct qed_chain *p_chain);
+
+	void		(*chain_free)(struct qed_dev *cdev,
+				      struct qed_chain *p_chain);
+};
+
+/**
+ * @brief qed_get_protocol_version
+ *
+ * @param protocol
+ *
+ * @return version supported by qed for given protocol driver
+ */
+u32 qed_get_protocol_version(enum qed_protocol protocol);
+
+#define MASK_FIELD(_name, _value) \
+	((_value) &= (_name ## _MASK))
+
+#define FIELD_VALUE(_name, _value) \
+	((_value & _name ## _MASK) << _name ## _SHIFT)
+
+#define SET_FIELD(value, name, flag)			       \
+	do {						       \
+		(value) &= ~(name ## _MASK << name ## _SHIFT); \
+		(value) |= (((u64)flag) << (name ## _SHIFT));  \
+	} while (0)
+
+#define GET_FIELD(value, name) \
+	(((value) >> (name ## _SHIFT)) & name ## _MASK)
+
+/* Debug print definitions */
+#define DP_ERR(cdev, fmt, ...)						     \
+		pr_err("[%s:%d(%s)]" fmt,				     \
+		       __func__, __LINE__,				     \
+		       DP_NAME(cdev) ? DP_NAME(cdev) : "",		     \
+		       ## __VA_ARGS__)					     \
+
+#define DP_NOTICE(cdev, fmt, ...)				      \
+	do {							      \
+		if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
+			pr_notice("[%s:%d(%s)]" fmt,		      \
+				  __func__, __LINE__,		      \
+				  DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+				  ## __VA_ARGS__);		      \
+								      \
+		}						      \
+	} while (0)
+
+#define DP_INFO(cdev, fmt, ...)					      \
+	do {							      \
+		if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) {   \
+			pr_notice("[%s:%d(%s)]" fmt,		      \
+				  __func__, __LINE__,		      \
+				  DP_NAME(cdev) ? DP_NAME(cdev) : "", \
+				  ## __VA_ARGS__);		      \
+		}						      \
+	} while (0)
+
+#define DP_VERBOSE(cdev, module, fmt, ...)				\
+	do {								\
+		if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) &&	\
+			     ((cdev)->dp_module & module))) {		\
+			pr_notice("[%s:%d(%s)]" fmt,			\
+				  __func__, __LINE__,			\
+				  DP_NAME(cdev) ? DP_NAME(cdev) : "",	\
+				  ## __VA_ARGS__);			\
+		}							\
+	} while (0)
+
+enum DP_LEVEL {
+	QED_LEVEL_VERBOSE	= 0x0,
+	QED_LEVEL_INFO		= 0x1,
+	QED_LEVEL_NOTICE	= 0x2,
+	QED_LEVEL_ERR		= 0x3,
+};
+
+#define QED_LOG_LEVEL_SHIFT     (30)
+#define QED_LOG_VERBOSE_MASK    (0x3fffffff)
+#define QED_LOG_INFO_MASK       (0x40000000)
+#define QED_LOG_NOTICE_MASK     (0x80000000)
+
+enum DP_MODULE {
+	QED_MSG_SPQ	= 0x10000,
+	QED_MSG_STATS	= 0x20000,
+	QED_MSG_DCB	= 0x40000,
+	QED_MSG_IOV	= 0x80000,
+	QED_MSG_SP	= 0x100000,
+	QED_MSG_STORAGE = 0x200000,
+	QED_MSG_CXT	= 0x800000,
+	QED_MSG_ILT	= 0x2000000,
+	QED_MSG_ROCE	= 0x4000000,
+	QED_MSG_DEBUG	= 0x8000000,
+	/* to be added...up to 0x8000000 */
+};
+
+struct qed_eth_stats {
+	u64	no_buff_discards;
+	u64	packet_too_big_discard;
+	u64	ttl0_discard;
+	u64	rx_ucast_bytes;
+	u64	rx_mcast_bytes;
+	u64	rx_bcast_bytes;
+	u64	rx_ucast_pkts;
+	u64	rx_mcast_pkts;
+	u64	rx_bcast_pkts;
+	u64	mftag_filter_discards;
+	u64	mac_filter_discards;
+	u64	tx_ucast_bytes;
+	u64	tx_mcast_bytes;
+	u64	tx_bcast_bytes;
+	u64	tx_ucast_pkts;
+	u64	tx_mcast_pkts;
+	u64	tx_bcast_pkts;
+	u64	tx_err_drop_pkts;
+	u64	tpa_coalesced_pkts;
+	u64	tpa_coalesced_events;
+	u64	tpa_aborts_num;
+	u64	tpa_not_coalesced_pkts;
+	u64	tpa_coalesced_bytes;
+
+	/* port */
+	u64	rx_64_byte_packets;
+	u64	rx_127_byte_packets;
+	u64	rx_255_byte_packets;
+	u64	rx_511_byte_packets;
+	u64	rx_1023_byte_packets;
+	u64	rx_1518_byte_packets;
+	u64	rx_1522_byte_packets;
+	u64	rx_2047_byte_packets;
+	u64	rx_4095_byte_packets;
+	u64	rx_9216_byte_packets;
+	u64	rx_16383_byte_packets;
+	u64	rx_crc_errors;
+	u64	rx_mac_crtl_frames;
+	u64	rx_pause_frames;
+	u64	rx_pfc_frames;
+	u64	rx_align_errors;
+	u64	rx_carrier_errors;
+	u64	rx_oversize_packets;
+	u64	rx_jabbers;
+	u64	rx_undersize_packets;
+	u64	rx_fragments;
+	u64	tx_64_byte_packets;
+	u64	tx_65_to_127_byte_packets;
+	u64	tx_128_to_255_byte_packets;
+	u64	tx_256_to_511_byte_packets;
+	u64	tx_512_to_1023_byte_packets;
+	u64	tx_1024_to_1518_byte_packets;
+	u64	tx_1519_to_2047_byte_packets;
+	u64	tx_2048_to_4095_byte_packets;
+	u64	tx_4096_to_9216_byte_packets;
+	u64	tx_9217_to_16383_byte_packets;
+	u64	tx_pause_frames;
+	u64	tx_pfc_frames;
+	u64	tx_lpi_entry_count;
+	u64	tx_total_collisions;
+	u64	brb_truncates;
+	u64	brb_discards;
+	u64	rx_mac_bytes;
+	u64	rx_mac_uc_packets;
+	u64	rx_mac_mc_packets;
+	u64	rx_mac_bc_packets;
+	u64	rx_mac_frames_ok;
+	u64	tx_mac_bytes;
+	u64	tx_mac_uc_packets;
+	u64	tx_mac_mc_packets;
+	u64	tx_mac_bc_packets;
+	u64	tx_mac_ctrl_frames;
+};
+
+#define QED_SB_IDX              0x0002
+
+#define RX_PI           0
+#define TX_PI(tc)       (RX_PI + 1 + tc)
+
+static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
+{
+	u32 prod = 0;
+	u16 rc = 0;
+
+	prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
+	       STATUS_BLOCK_PROD_INDEX_MASK;
+	if (sb_info->sb_ack != prod) {
+		sb_info->sb_ack = prod;
+		rc |= QED_SB_IDX;
+	}
+
+	/* Let SB update */
+	mmiowb();
+	return rc;
+}
+
+/**
+ *
+ * @brief This function creates an update command for interrupts that is
+ *        written to the IGU.
+ *
+ * @param sb_info       - This is the structure allocated and
+ *                 initialized per status block. Assumption is
+ *                 that it was initialized using qed_sb_init
+ * @param int_cmd       - Enable/Disable/Nop
+ * @param upd_flg       - whether igu consumer should be
+ *                 updated.
+ *
+ * @return inline void
+ */
+static inline void qed_sb_ack(struct qed_sb_info *sb_info,
+			      enum igu_int_cmd int_cmd,
+			      u8 upd_flg)
+{
+	struct igu_prod_cons_update igu_ack = { 0 };
+
+	igu_ack.sb_id_and_flags =
+		((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+		 (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+		 (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+		 (IGU_SEG_ACCESS_REG <<
+		  IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+	DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
+
+	/* Both segments (interrupts & acks) are written to same place address;
+	 * Need to guarantee all commands will be received (in-order) by HW.
+	 */
+	mmiowb();
+	barrier();
+}
+
+static inline void __internal_ram_wr(void *p_hwfn,
+				     void __iomem *addr,
+				     int size,
+				     u32 *data)
+
+{
+	unsigned int i;
+
+	for (i = 0; i < size / sizeof(*data); i++)
+		DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
+}
+
+static inline void internal_ram_wr(void __iomem *addr,
+				   int size,
+				   u32 *data)
+{
+	__internal_ram_wr(NULL, addr, size, data);
+}
+
+#endif
diff --git a/include/linux/random.h b/include/linux/random.h
index e651874..a75840c 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -7,6 +7,8 @@
 #define _LINUX_RANDOM_H
 
 #include <linux/list.h>
+#include <linux/once.h>
+
 #include <uapi/linux/random.h>
 
 struct random_ready_callback {
@@ -45,6 +47,10 @@
 
 u32 prandom_u32_state(struct rnd_state *state);
 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
+void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
+
+#define prandom_init_once(pcpu_state)			\
+	DO_ONCE(prandom_seed_full_state, (pcpu_state))
 
 /**
  * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index ff47651..581abf8 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -230,12 +230,11 @@
 		   struct rcu_synchronize *rs_array);
 
 #define _wait_rcu_gp(checktiny, ...) \
-do { \
-	call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
-	const int __n = ARRAY_SIZE(__crcu_array); \
-	struct rcu_synchronize __rs_array[__n]; \
-	\
-	__wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \
+do {									\
+	call_rcu_func_t __crcu_array[] = { __VA_ARGS__ };		\
+	struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)];	\
+	__wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array),		\
+			__crcu_array, __rs_array);			\
 } while (0)
 
 #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 8fc0bfd..b49d413 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -296,6 +296,8 @@
 				  unsigned int *val);
 typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
 				   unsigned int val);
+typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg,
+					 unsigned int mask, unsigned int val);
 typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
 typedef void (*regmap_hw_free_context)(void *context);
 
@@ -335,6 +337,7 @@
 	regmap_hw_gather_write gather_write;
 	regmap_hw_async_write async_write;
 	regmap_hw_reg_write reg_write;
+	regmap_hw_reg_update_bits reg_update_bits;
 	regmap_hw_read read;
 	regmap_hw_reg_read reg_read;
 	regmap_hw_free_context free_context;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 39adaa9..4be5048 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -33,11 +33,11 @@
 extern struct mutex net_mutex;
 
 #ifdef CONFIG_PROVE_LOCKING
-extern int lockdep_rtnl_is_held(void);
+extern bool lockdep_rtnl_is_held(void);
 #else
-static inline int lockdep_rtnl_is_held(void)
+static inline bool lockdep_rtnl_is_held(void)
 {
-	return 1;
+	return true;
 }
 #endif /* #ifdef CONFIG_PROVE_LOCKING */
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b7b9501..4817df5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -840,7 +840,7 @@
 	struct hlist_node uidhash_node;
 	kuid_t uid;
 
-#ifdef CONFIG_PERF_EVENTS
+#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
 	atomic_long_t locked_vm;
 #endif
 };
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index f426503..2296e6b 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -95,4 +95,15 @@
 	return;
 }
 #endif /* CONFIG_SECCOMP_FILTER */
+
+#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
+extern long seccomp_get_filter(struct task_struct *task,
+			       unsigned long filter_off, void __user *data);
+#else
+static inline long seccomp_get_filter(struct task_struct *task,
+				      unsigned long n, void __user *data)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */
 #endif /* _LINUX_SECCOMP_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2b0a30a..24f4dfd 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -463,6 +463,15 @@
 	return delta_us;
 }
 
+static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
+				    const struct skb_mstamp *t0)
+{
+	s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
+
+	if (!diff)
+		diff = t1->stamp_us - t0->stamp_us;
+	return diff > 0;
+}
 
 /** 
  *	struct sk_buff - socket buffer
@@ -2708,7 +2717,7 @@
 	if (skb->ip_summed == CHECKSUM_COMPLETE)
 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
 	else if (skb->ip_summed == CHECKSUM_PARTIAL &&
-		 skb_checksum_start_offset(skb) <= len)
+		 skb_checksum_start_offset(skb) < 0)
 		skb->ip_summed = CHECKSUM_NONE;
 }
 
diff --git a/include/linux/string.h b/include/linux/string.h
index a8d90db..9ef7795 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -25,6 +25,9 @@
 #ifndef __HAVE_ARCH_STRLCPY
 size_t strlcpy(char *, const char *, size_t);
 #endif
+#ifndef __HAVE_ARCH_STRSCPY
+ssize_t __must_check strscpy(char *, const char *, size_t);
+#endif
 #ifndef __HAVE_ARCH_STRCAT
 extern char * strcat(char *, const char *);
 #endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fcb573b..c906f45 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -194,6 +194,12 @@
 	u32	window_clamp;	/* Maximal window to advertise		*/
 	u32	rcv_ssthresh;	/* Current window clamp			*/
 
+	/* Information of the most recently (s)acked skb */
+	struct tcp_rack {
+		struct skb_mstamp mstamp; /* (Re)sent time of the skb */
+		u8 advanced; /* mstamp advanced since last lost marking */
+		u8 reord;    /* reordering detected */
+	} rack;
 	u16	advmss;		/* Advertised MSS			*/
 	u8	unused;
 	u8	nonagle     : 4,/* Disable Nagle algorithm?             */
@@ -217,6 +223,9 @@
 	u32	mdev_max_us;	/* maximal mdev for the last rtt period	*/
 	u32	rttvar_us;	/* smoothed mdev_max			*/
 	u32	rtt_seq;	/* sequence number to update rttvar	*/
+	struct rtt_meas {
+		u32 rtt, ts;	/* RTT in usec and sampling time in jiffies. */
+	} rtt_min[3];
 
 	u32	packets_out;	/* Packets which are "in flight"	*/
 	u32	retrans_out;	/* Retransmitted packets out		*/
@@ -280,8 +289,6 @@
 	int     lost_cnt_hint;
 	u32     retransmit_high;	/* L-bits may be on up to this seqno */
 
-	u32	lost_retrans_low;	/* Sent seq after any rxmit (lowest) */
-
 	u32	prior_ssthresh; /* ssthresh saved at recovery start	*/
 	u32	high_seq;	/* snd_nxt at onset of congestion	*/
 
@@ -356,8 +363,8 @@
 
 struct tcp_timewait_sock {
 	struct inet_timewait_sock tw_sk;
-	u32			  tw_rcv_nxt;
-	u32			  tw_snd_nxt;
+#define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt
+#define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt
 	u32			  tw_rcv_wnd;
 	u32			  tw_ts_offset;
 	u32			  tw_ts_recent;
@@ -382,25 +389,12 @@
 		tcp_sk(sk)->fastopen_rsk != NULL);
 }
 
-extern void tcp_sock_destruct(struct sock *sk);
-
-static inline int fastopen_init_queue(struct sock *sk, int backlog)
+static inline void fastopen_queue_tune(struct sock *sk, int backlog)
 {
-	struct request_sock_queue *queue =
-	    &inet_csk(sk)->icsk_accept_queue;
+	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+	int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
 
-	if (queue->fastopenq == NULL) {
-		queue->fastopenq = kzalloc(
-		    sizeof(struct fastopen_queue),
-		    sk->sk_allocation);
-		if (queue->fastopenq == NULL)
-			return -ENOMEM;
-
-		sk->sk_destruct = tcp_sock_destruct;
-		spin_lock_init(&queue->fastopenq->lock);
-	}
-	queue->fastopenq->max_qlen = backlog;
-	return 0;
+	queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
 }
 
 static inline void tcp_saved_syn_free(struct tcp_sock *tp)
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 3dd5a78..bfb7472 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -157,7 +157,7 @@
 	 */
 	int pio_dma_border; /* default is 64byte */
 
-	u32 type;
+	uintptr_t type;
 	u32 enable_gpio;
 
 	/*
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index eeae5eb..cf3bc56 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -56,75 +56,23 @@
 #include <net/ipv6.h>
 #include <net/net_namespace.h>
 
-#define UIP_802154_SHORTADDR_LEN	2  /* compressed ipv6 address length */
-#define UIP_IPH_LEN			40 /* ipv6 fixed header size */
-#define UIP_PROTO_UDP			17 /* ipv6 next header value for UDP */
-#define UIP_FRAGH_LEN			8  /* ipv6 fragment header size */
+#define EUI64_ADDR_LEN		8
 
-/*
- * ipv6 address based on mac
- * second bit-flip (Universe/Local) is done according RFC2464
+#define LOWPAN_NHC_MAX_ID_LEN	1
+/* Maximum next header compression length which we currently support inclusive
+ * possible inline data.
  */
-#define is_addr_mac_addr_based(a, m) \
-	((((a)->s6_addr[8])  == (((m)[0]) ^ 0x02)) &&	\
-	 (((a)->s6_addr[9])  == (m)[1]) &&		\
-	 (((a)->s6_addr[10]) == (m)[2]) &&		\
-	 (((a)->s6_addr[11]) == (m)[3]) &&		\
-	 (((a)->s6_addr[12]) == (m)[4]) &&		\
-	 (((a)->s6_addr[13]) == (m)[5]) &&		\
-	 (((a)->s6_addr[14]) == (m)[6]) &&		\
-	 (((a)->s6_addr[15]) == (m)[7]))
-
-/*
- * check whether we can compress the IID to 16 bits,
- * it's possible for unicast adresses with first 49 bits are zero only.
+#define LOWPAN_NHC_MAX_HDR_LEN	(sizeof(struct udphdr))
+/* Max IPHC Header len without IPv6 hdr specific inline data.
+ * Useful for getting the "extra" bytes we need at worst case compression.
+ *
+ * LOWPAN_IPHC + CID + LOWPAN_NHC_MAX_ID_LEN
  */
-#define lowpan_is_iid_16_bit_compressable(a)	\
-	((((a)->s6_addr16[4]) == 0) &&		\
-	 (((a)->s6_addr[10]) == 0) &&		\
-	 (((a)->s6_addr[11]) == 0xff) &&	\
-	 (((a)->s6_addr[12]) == 0xfe) &&	\
-	 (((a)->s6_addr[13]) == 0))
-
-/* check whether the 112-bit gid of the multicast address is mappable to: */
-
-/* 48 bits, FFXX::00XX:XXXX:XXXX */
-#define lowpan_is_mcast_addr_compressable48(a)	\
-	((((a)->s6_addr16[1]) == 0) &&		\
-	 (((a)->s6_addr16[2]) == 0) &&		\
-	 (((a)->s6_addr16[3]) == 0) &&		\
-	 (((a)->s6_addr16[4]) == 0) &&		\
-	 (((a)->s6_addr[10]) == 0))
-
-/* 32 bits, FFXX::00XX:XXXX */
-#define lowpan_is_mcast_addr_compressable32(a)	\
-	((((a)->s6_addr16[1]) == 0) &&		\
-	 (((a)->s6_addr16[2]) == 0) &&		\
-	 (((a)->s6_addr16[3]) == 0) &&		\
-	 (((a)->s6_addr16[4]) == 0) &&		\
-	 (((a)->s6_addr16[5]) == 0) &&		\
-	 (((a)->s6_addr[12]) == 0))
-
-/* 8 bits, FF02::00XX */
-#define lowpan_is_mcast_addr_compressable8(a)	\
-	((((a)->s6_addr[1])  == 2) &&		\
-	 (((a)->s6_addr16[1]) == 0) &&		\
-	 (((a)->s6_addr16[2]) == 0) &&		\
-	 (((a)->s6_addr16[3]) == 0) &&		\
-	 (((a)->s6_addr16[4]) == 0) &&		\
-	 (((a)->s6_addr16[5]) == 0) &&		\
-	 (((a)->s6_addr16[6]) == 0) &&		\
-	 (((a)->s6_addr[14]) == 0))
-
-#define lowpan_is_addr_broadcast(a)	\
-	((((a)[0]) == 0xFF) &&	\
-	 (((a)[1]) == 0xFF) &&	\
-	 (((a)[2]) == 0xFF) &&	\
-	 (((a)[3]) == 0xFF) &&	\
-	 (((a)[4]) == 0xFF) &&	\
-	 (((a)[5]) == 0xFF) &&	\
-	 (((a)[6]) == 0xFF) &&	\
-	 (((a)[7]) == 0xFF))
+#define LOWPAN_IPHC_MAX_HEADER_LEN	(2 + 1 + LOWPAN_NHC_MAX_ID_LEN)
+/* Maximum worst case IPHC header buffer size */
+#define LOWPAN_IPHC_MAX_HC_BUF_LEN	(sizeof(struct ipv6hdr) +	\
+					 LOWPAN_IPHC_MAX_HEADER_LEN +	\
+					 LOWPAN_NHC_MAX_HDR_LEN)
 
 #define LOWPAN_DISPATCH_IPV6		0x41 /* 01000001 = 65 */
 #define LOWPAN_DISPATCH_IPHC		0x60 /* 011xxxxx = ... */
@@ -140,69 +88,6 @@
 	return (dispatch & LOWPAN_DISPATCH_IPHC_MASK) == LOWPAN_DISPATCH_IPHC;
 }
 
-#define LOWPAN_FRAG_TIMEOUT	(HZ * 60)	/* time-out 60 sec */
-
-#define LOWPAN_FRAG1_HEAD_SIZE	0x4
-#define LOWPAN_FRAGN_HEAD_SIZE	0x5
-
-/*
- * Values of fields within the IPHC encoding first byte
- * (C stands for compressed and I for inline)
- */
-#define LOWPAN_IPHC_TF		0x18
-
-#define LOWPAN_IPHC_FL_C	0x10
-#define LOWPAN_IPHC_TC_C	0x08
-#define LOWPAN_IPHC_NH_C	0x04
-#define LOWPAN_IPHC_TTL_1	0x01
-#define LOWPAN_IPHC_TTL_64	0x02
-#define LOWPAN_IPHC_TTL_255	0x03
-#define LOWPAN_IPHC_TTL_I	0x00
-
-
-/* Values of fields within the IPHC encoding second byte */
-#define LOWPAN_IPHC_CID		0x80
-
-#define LOWPAN_IPHC_ADDR_00	0x00
-#define LOWPAN_IPHC_ADDR_01	0x01
-#define LOWPAN_IPHC_ADDR_02	0x02
-#define LOWPAN_IPHC_ADDR_03	0x03
-
-#define LOWPAN_IPHC_SAC		0x40
-#define LOWPAN_IPHC_SAM		0x30
-
-#define LOWPAN_IPHC_SAM_BIT	4
-
-#define LOWPAN_IPHC_M		0x08
-#define LOWPAN_IPHC_DAC		0x04
-#define LOWPAN_IPHC_DAM_00	0x00
-#define LOWPAN_IPHC_DAM_01	0x01
-#define LOWPAN_IPHC_DAM_10	0x02
-#define LOWPAN_IPHC_DAM_11	0x03
-
-#define LOWPAN_IPHC_DAM_BIT	0
-/*
- * LOWPAN_UDP encoding (works together with IPHC)
- */
-#define LOWPAN_NHC_UDP_MASK		0xF8
-#define LOWPAN_NHC_UDP_ID		0xF0
-#define LOWPAN_NHC_UDP_CHECKSUMC	0x04
-#define LOWPAN_NHC_UDP_CHECKSUMI	0x00
-
-#define LOWPAN_NHC_UDP_4BIT_PORT	0xF0B0
-#define LOWPAN_NHC_UDP_4BIT_MASK	0xFFF0
-#define LOWPAN_NHC_UDP_8BIT_PORT	0xF000
-#define LOWPAN_NHC_UDP_8BIT_MASK	0xFF00
-
-/* values for port compression, _with checksum_ ie bit 5 set to 0 */
-#define LOWPAN_NHC_UDP_CS_P_00	0xF0 /* all inline */
-#define LOWPAN_NHC_UDP_CS_P_01	0xF1 /* source 16bit inline,
-					dest = 0xF0 + 8 bit inline */
-#define LOWPAN_NHC_UDP_CS_P_10	0xF2 /* source = 0xF0 + 8bit inline,
-					dest = 16 bit inline */
-#define LOWPAN_NHC_UDP_CS_P_11	0xF3 /* source & dest = 0xF0B + 4bit inline */
-#define LOWPAN_NHC_UDP_CS_C	0x04 /* checksum elided */
-
 #define LOWPAN_PRIV_SIZE(llpriv_size)	\
 	(sizeof(struct lowpan_priv) + llpriv_size)
 
@@ -240,7 +125,7 @@
 #ifdef DEBUG
 /* print data in line */
 static inline void raw_dump_inline(const char *caller, char *msg,
-				   unsigned char *buf, int len)
+				   const unsigned char *buf, int len)
 {
 	if (msg)
 		pr_debug("%s():%s: ", caller, msg);
@@ -255,7 +140,7 @@
  * ...
  */
 static inline void raw_dump_table(const char *caller, char *msg,
-				  unsigned char *buf, int len)
+				  const unsigned char *buf, int len)
 {
 	if (msg)
 		pr_debug("%s():%s:\n", caller, msg);
@@ -264,24 +149,25 @@
 }
 #else
 static inline void raw_dump_table(const char *caller, char *msg,
-				  unsigned char *buf, int len) { }
+				  const unsigned char *buf, int len) { }
 static inline void raw_dump_inline(const char *caller, char *msg,
-				   unsigned char *buf, int len) { }
+				   const unsigned char *buf, int len) { }
 #endif
 
-static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
-{
-	if (unlikely(!pskb_may_pull(skb, 1)))
-		return -EINVAL;
-
-	*val = skb->data[0];
-	skb_pull(skb, 1);
-
-	return 0;
-}
-
-static inline bool lowpan_fetch_skb(struct sk_buff *skb,
-		void *data, const unsigned int len)
+/**
+ * lowpan_fetch_skb - getting inline data from 6LoWPAN header
+ *
+ * This function will pull data from sk buffer and put it into data to
+ * remove the 6LoWPAN inline data. This function returns true if the
+ * sk buffer is too small to pull the amount of data which is specified
+ * by len.
+ *
+ * @skb: the buffer where the inline data should be pulled from.
+ * @data: destination buffer for the inline data.
+ * @len: amount of data which should be pulled in bytes.
+ */
+static inline bool lowpan_fetch_skb(struct sk_buff *skb, void *data,
+				    unsigned int len)
 {
 	if (unlikely(!pskb_may_pull(skb, len)))
 		return true;
@@ -301,14 +187,42 @@
 
 void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
 
-int
-lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
-			 const u8 *saddr, const u8 saddr_type,
-			 const u8 saddr_len, const u8 *daddr,
-			 const u8 daddr_type, const u8 daddr_len,
-			 u8 iphc0, u8 iphc1);
-int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
-			unsigned short type, const void *_daddr,
-			const void *_saddr, unsigned int len);
+/**
+ * lowpan_header_decompress - replace 6LoWPAN header with IPv6 header
+ *
+ * This function replaces the IPHC 6LoWPAN header which should be pointed at
+ * skb->data and skb_network_header, with the IPv6 header.
+ * It would be nice that the caller have the necessary headroom of IPv6 header
+ * and greatest Transport layer header, this would reduce the overhead for
+ * reallocate headroom.
+ *
+ * @skb: the buffer which should be manipulate.
+ * @dev: the lowpan net device pointer.
+ * @daddr: destination lladdr of mac header which is used for compression
+ *	methods.
+ * @saddr: source lladdr of mac header which is used for compression
+ *	methods.
+ */
+int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
+			     const void *daddr, const void *saddr);
+
+/**
+ * lowpan_header_compress - replace IPv6 header with 6LoWPAN header
+ *
+ * This function replaces the IPv6 header which should be pointed at
+ * skb->data and skb_network_header, with the IPHC 6LoWPAN header.
+ * The caller need to be sure that the sk buffer is not shared and at have
+ * at least a headroom which is smaller or equal LOWPAN_IPHC_MAX_HEADER_LEN,
+ * which is the IPHC "more bytes than IPv6 header" at worst case.
+ *
+ * @skb: the buffer which should be manipulate.
+ * @dev: the lowpan net device pointer.
+ * @daddr: destination lladdr of mac header which is used for compression
+ *	methods.
+ * @saddr: source lladdr of mac header which is used for compression
+ *	methods.
+ */
+int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
+			   const void *daddr, const void *saddr);
 
 #endif /* __6LOWPAN_H__ */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 4a167b3..b36d837 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -63,7 +63,11 @@
 #define UNIX_GC_MAYBE_CYCLE	1
 	struct socket_wq	peer_wq;
 };
-#define unix_sk(__sk) ((struct unix_sock *)__sk)
+
+static inline struct unix_sock *unix_sk(const struct sock *sk)
+{
+	return (struct unix_sock *)sk;
+}
 
 #define peer_wait peer_wq.wait
 
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index f5ade85..42844d7 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -122,11 +122,14 @@
 __printf(1, 2)
 void bt_info(const char *fmt, ...);
 __printf(1, 2)
+void bt_warn(const char *fmt, ...);
+__printf(1, 2)
 void bt_err(const char *fmt, ...);
 __printf(1, 2)
 void bt_err_ratelimited(const char *fmt, ...);
 
 #define BT_INFO(fmt, ...)	bt_info(fmt "\n", ##__VA_ARGS__)
+#define BT_WARN(fmt, ...)	bt_warn(fmt "\n", ##__VA_ARGS__)
 #define BT_ERR(fmt, ...)	bt_err(fmt "\n", ##__VA_ARGS__)
 #define BT_DBG(fmt, ...)	pr_debug(fmt "\n", ##__VA_ARGS__)
 
@@ -134,6 +137,8 @@
 
 #define bt_dev_info(hdev, fmt, ...)				\
 	BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+#define bt_dev_warn(hdev, fmt, ...)				\
+	BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
 #define bt_dev_err(hdev, fmt, ...)				\
 	BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
 #define bt_dev_dbg(hdev, fmt, ...)				\
@@ -291,22 +296,22 @@
 typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
 				       u16 opcode, struct sk_buff *skb);
 
-struct req_ctrl {
-	bool start;
-	u8 event;
-	hci_req_complete_t complete;
-	hci_req_complete_skb_t complete_skb;
+struct hci_ctrl {
+	__u16 opcode;
+	bool req_start;
+	u8 req_event;
+	hci_req_complete_t req_complete;
+	hci_req_complete_skb_t req_complete_skb;
 };
 
 struct bt_skb_cb {
 	__u8 pkt_type;
 	__u8 force_active;
-	__u16 opcode;
 	__u16 expect;
 	__u8 incoming:1;
 	union {
 		struct l2cap_ctrl l2cap;
-		struct req_ctrl req;
+		struct hci_ctrl hci;
 	};
 };
 #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 7ca6690..0205b80 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -44,6 +44,9 @@
 #define HCI_DEV_DOWN			4
 #define HCI_DEV_SUSPEND			5
 #define HCI_DEV_RESUME			6
+#define HCI_DEV_OPEN			7
+#define HCI_DEV_CLOSE			8
+#define HCI_DEV_SETUP			9
 
 /* HCI notify events */
 #define HCI_NOTIFY_CONN_ADD		1
@@ -168,6 +171,15 @@
 	 * during the hdev->setup vendor callback.
 	 */
 	HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
+
+	/* When this quirk is set, the enabling of diagnostic mode is
+	 * not persistent over HCI Reset. Every time the controller
+	 * is brought up it needs to be reprogrammed.
+	 *
+	 * This quirk can be set before hci_register_dev is called or
+	 * during the hdev->setup vendor callback.
+	 */
+	HCI_QUIRK_NON_PERSISTENT_DIAG,
 };
 
 /* HCI device flags */
@@ -238,6 +250,7 @@
 	HCI_LE_SCAN_INTERRUPTED,
 
 	HCI_DUT_MODE,
+	HCI_VENDOR_DIAG,
 	HCI_FORCE_BREDR_SMP,
 	HCI_FORCE_STATIC_ADDR,
 
@@ -260,6 +273,7 @@
 #define HCI_ACLDATA_PKT		0x02
 #define HCI_SCODATA_PKT		0x03
 #define HCI_EVENT_PKT		0x04
+#define HCI_DIAG_PKT		0xf0
 #define HCI_VENDOR_PKT		0xff
 
 /* HCI packet types */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 256e673..1878d0a 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -398,6 +398,8 @@
 	int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
 	void (*hw_error)(struct hci_dev *hdev, u8 code);
+	int (*post_init)(struct hci_dev *hdev);
+	int (*set_diag)(struct hci_dev *hdev, bool enable);
 	int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
 };
 
@@ -469,6 +471,7 @@
 	struct delayed_work auto_accept_work;
 	struct delayed_work idle_work;
 	struct delayed_work le_conn_timeout;
+	struct work_struct  le_scan_cleanup;
 
 	struct device	dev;
 	struct dentry	*debugfs;
@@ -791,6 +794,30 @@
 	return NULL;
 }
 
+static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
+						       bdaddr_t *ba,
+						       __u8 ba_type)
+{
+	struct hci_conn_hash *h = &hdev->conn_hash;
+	struct hci_conn  *c;
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(c, &h->list, list) {
+		if (c->type != LE_LINK)
+		       continue;
+
+		if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
+			rcu_read_unlock();
+			return c;
+		}
+	}
+
+	rcu_read_unlock();
+
+	return NULL;
+}
+
 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
 							__u8 type, __u16 state)
 {
@@ -1015,9 +1042,6 @@
 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
 						  bdaddr_t *addr,
 						  u8 addr_type);
-struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
-						    bdaddr_t *addr,
-						    u8 addr_type);
 
 void hci_uuids_clear(struct hci_dev *hdev);
 
@@ -1066,6 +1090,7 @@
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
 
 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
+int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
 
 void hci_init_sysfs(struct hci_dev *hdev);
 void hci_conn_init_sysfs(struct hci_conn *conn);
@@ -1349,6 +1374,9 @@
 
 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
 
+struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+			     const void *param, u32 timeout);
+
 /* ----- HCI Sockets ----- */
 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
@@ -1453,7 +1481,7 @@
 void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
 bool mgmt_powering_down(struct hci_dev *hdev);
 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
-void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk);
+void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
 		   bool persistent);
 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 77d1e57..2b67567 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -39,6 +39,10 @@
 #define HCI_MON_ACL_RX_PKT	5
 #define HCI_MON_SCO_TX_PKT	6
 #define HCI_MON_SCO_RX_PKT	7
+#define HCI_MON_OPEN_INDEX	8
+#define HCI_MON_CLOSE_INDEX	9
+#define HCI_MON_INDEX_INFO	10
+#define HCI_MON_VENDOR_DIAG	11
 
 struct hci_mon_new_index {
 	__u8		type;
@@ -48,4 +52,10 @@
 } __packed;
 #define HCI_MON_NEW_INDEX_SIZE 16
 
+struct hci_mon_index_info {
+	bdaddr_t	bdaddr;
+	__le16		manufacturer;
+} __packed;
+#define HCI_MON_INDEX_INFO_SIZE 8
+
 #endif /* __HCI_MON_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index f0889a2..48155be 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5,6 +5,7 @@
  *
  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2015	Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -858,6 +859,8 @@
 /**
  * enum cfg80211_station_type - the type of station being modified
  * @CFG80211_STA_AP_CLIENT: client of an AP interface
+ * @CFG80211_STA_AP_CLIENT_UNASSOC: client of an AP interface that is still
+ *	unassociated (update properties for this type of client is permitted)
  * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has
  *	the AP MLME in the device
  * @CFG80211_STA_AP_STA: AP station on managed interface
@@ -873,6 +876,7 @@
  */
 enum cfg80211_station_type {
 	CFG80211_STA_AP_CLIENT,
+	CFG80211_STA_AP_CLIENT_UNASSOC,
 	CFG80211_STA_AP_MLME_CLIENT,
 	CFG80211_STA_AP_STA,
 	CFG80211_STA_IBSS,
@@ -1498,13 +1502,26 @@
 };
 
 /**
+ * struct cfg80211_sched_scan_plan - scan plan for scheduled scan
+ *
+ * @interval: interval between scheduled scan iterations. In seconds.
+ * @iterations: number of scan iterations in this scan plan. Zero means
+ *	infinite loop.
+ *	The last scan plan will always have this parameter set to zero,
+ *	all other scan plans will have a finite number of iterations.
+ */
+struct cfg80211_sched_scan_plan {
+	u32 interval;
+	u32 iterations;
+};
+
+/**
  * struct cfg80211_sched_scan_request - scheduled scan request description
  *
  * @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
  * @n_ssids: number of SSIDs
  * @n_channels: total number of channels to scan
  * @scan_width: channel width for scanning
- * @interval: interval between each scheduled scan cycle
  * @ie: optional information element(s) to add into Probe Request or %NULL
  * @ie_len: length of ie in octets
  * @flags: bit field of flags controlling operation
@@ -1523,6 +1540,9 @@
  * @mac_addr_mask: MAC address mask used with randomisation, bits that
  *	are 0 in the mask should be randomised, bits that are 1 should
  *	be taken from the @mac_addr
+ * @scan_plans: scan plans to be executed in this scheduled scan. Lowest
+ *	index must be executed first.
+ * @n_scan_plans: number of scan plans, at least 1.
  * @rcu_head: RCU callback used to free the struct
  * @owner_nlportid: netlink portid of owner (if this should is a request
  *	owned by a particular socket)
@@ -1536,7 +1556,6 @@
 	int n_ssids;
 	u32 n_channels;
 	enum nl80211_bss_scan_width scan_width;
-	u32 interval;
 	const u8 *ie;
 	size_t ie_len;
 	u32 flags;
@@ -1544,6 +1563,8 @@
 	int n_match_sets;
 	s32 min_rssi_thold;
 	u32 delay;
+	struct cfg80211_sched_scan_plan *scan_plans;
+	int n_scan_plans;
 
 	u8 mac_addr[ETH_ALEN] __aligned(2);
 	u8 mac_addr_mask[ETH_ALEN] __aligned(2);
@@ -1573,6 +1594,26 @@
 };
 
 /**
+ * struct cfg80211_inform_bss - BSS inform data
+ * @chan: channel the frame was received on
+ * @scan_width: scan width that was used
+ * @signal: signal strength value, according to the wiphy's
+ *	signal type
+ * @boottime_ns: timestamp (CLOCK_BOOTTIME) when the information was
+ *	received; should match the time when the frame was actually
+ *	received by the device (not just by the host, in case it was
+ *	buffered on the device) and be accurate to about 10ms.
+ *	If the frame isn't buffered, just passing the return value of
+ *	ktime_get_boot_ns() is likely appropriate.
+ */
+struct cfg80211_inform_bss {
+	struct ieee80211_channel *chan;
+	enum nl80211_bss_scan_width scan_width;
+	s32 signal;
+	u64 boottime_ns;
+};
+
+/**
  * struct cfg80211_bss_ie_data - BSS entry IE data
  * @tsf: TSF contained in the frame that carried these IEs
  * @rcu_head: internal use, for freeing
@@ -2971,12 +3012,21 @@
  * @doit: callback for the operation, note that wdev is %NULL if the
  *	flags didn't ask for a wdev and non-%NULL otherwise; the data
  *	pointer may be %NULL if userspace provided no data at all
+ * @dumpit: dump callback, for transferring bigger/multiple items. The
+ *	@storage points to cb->args[5], ie. is preserved over the multiple
+ *	dumpit calls.
+ * It's recommended to not have the same sub command with both @doit and
+ * @dumpit, so that userspace can assume certain ones are get and others
+ * are used with dump requests.
  */
 struct wiphy_vendor_command {
 	struct nl80211_vendor_cmd_info info;
 	u32 flags;
 	int (*doit)(struct wiphy *wiphy, struct wireless_dev *wdev,
 		    const void *data, int data_len);
+	int (*dumpit)(struct wiphy *wiphy, struct wireless_dev *wdev,
+		      struct sk_buff *skb, const void *data, int data_len,
+		      unsigned long *storage);
 };
 
 /**
@@ -3044,6 +3094,12 @@
  *	include fixed IEs like supported rates
  * @max_sched_scan_ie_len: same as max_scan_ie_len, but for scheduled
  *	scans
+ * @max_sched_scan_plans: maximum number of scan plans (scan interval and number
+ *	of iterations) for scheduled scan supported by the device.
+ * @max_sched_scan_plan_interval: maximum interval (in seconds) for a
+ *	single scan plan supported by the device.
+ * @max_sched_scan_plan_iterations: maximum number of iterations for a single
+ *	scan plan supported by the device.
  * @coverage_class: current coverage class
  * @fw_version: firmware version for ethtool reporting
  * @hw_version: hardware version for ethtool reporting
@@ -3151,6 +3207,9 @@
 	u8 max_match_sets;
 	u16 max_scan_ie_len;
 	u16 max_sched_scan_ie_len;
+	u32 max_sched_scan_plans;
+	u32 max_sched_scan_plan_interval;
+	u32 max_sched_scan_plan_iterations;
 
 	int n_cipher_suites;
 	const u32 *cipher_suites;
@@ -3946,14 +4005,11 @@
 void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
 
 /**
- * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
- *
+ * cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame
  * @wiphy: the wiphy reporting the BSS
- * @rx_channel: The channel the frame was received on
- * @scan_width: width of the control channel
+ * @data: the BSS metadata
  * @mgmt: the management frame (probe response or beacon)
  * @len: length of the management frame
- * @signal: the signal strength, type depends on the wiphy's signal_type
  * @gfp: context flags
  *
  * This informs cfg80211 that BSS information was found and
@@ -3963,11 +4019,26 @@
  * Or %NULL on error.
  */
 struct cfg80211_bss * __must_check
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+			       struct cfg80211_inform_bss *data,
+			       struct ieee80211_mgmt *mgmt, size_t len,
+			       gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
 cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
 				struct ieee80211_channel *rx_channel,
 				enum nl80211_bss_scan_width scan_width,
 				struct ieee80211_mgmt *mgmt, size_t len,
-				s32 signal, gfp_t gfp);
+				s32 signal, gfp_t gfp)
+{
+	struct cfg80211_inform_bss data = {
+		.chan = rx_channel,
+		.scan_width = scan_width,
+		.signal = signal,
+	};
+
+	return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
+}
 
 static inline struct cfg80211_bss * __must_check
 cfg80211_inform_bss_frame(struct wiphy *wiphy,
@@ -3975,9 +4046,13 @@
 			  struct ieee80211_mgmt *mgmt, size_t len,
 			  s32 signal, gfp_t gfp)
 {
-	return cfg80211_inform_bss_width_frame(wiphy, rx_channel,
-					       NL80211_BSS_CHAN_WIDTH_20,
-					       mgmt, len, signal, gfp);
+	struct cfg80211_inform_bss data = {
+		.chan = rx_channel,
+		.scan_width = NL80211_BSS_CHAN_WIDTH_20,
+		.signal = signal,
+	};
+
+	return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
 }
 
 /**
@@ -3994,11 +4069,10 @@
 };
 
 /**
- * cfg80211_inform_bss_width - inform cfg80211 of a new BSS
+ * cfg80211_inform_bss_data - inform cfg80211 of a new BSS
  *
  * @wiphy: the wiphy reporting the BSS
- * @rx_channel: The channel the frame was received on
- * @scan_width: width of the control channel
+ * @data: the BSS metadata
  * @ftype: frame type (if known)
  * @bssid: the BSSID of the BSS
  * @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
@@ -4006,7 +4080,6 @@
  * @beacon_interval: the beacon interval announced by the peer
  * @ie: additional IEs sent by the peer
  * @ielen: length of the additional IEs
- * @signal: the signal strength, type depends on the wiphy's signal_type
  * @gfp: context flags
  *
  * This informs cfg80211 that BSS information was found and
@@ -4016,13 +4089,32 @@
  * Or %NULL on error.
  */
 struct cfg80211_bss * __must_check
+cfg80211_inform_bss_data(struct wiphy *wiphy,
+			 struct cfg80211_inform_bss *data,
+			 enum cfg80211_bss_frame_type ftype,
+			 const u8 *bssid, u64 tsf, u16 capability,
+			 u16 beacon_interval, const u8 *ie, size_t ielen,
+			 gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
 cfg80211_inform_bss_width(struct wiphy *wiphy,
 			  struct ieee80211_channel *rx_channel,
 			  enum nl80211_bss_scan_width scan_width,
 			  enum cfg80211_bss_frame_type ftype,
 			  const u8 *bssid, u64 tsf, u16 capability,
 			  u16 beacon_interval, const u8 *ie, size_t ielen,
-			  s32 signal, gfp_t gfp);
+			  s32 signal, gfp_t gfp)
+{
+	struct cfg80211_inform_bss data = {
+		.chan = rx_channel,
+		.scan_width = scan_width,
+		.signal = signal,
+	};
+
+	return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf,
+					capability, beacon_interval, ie, ielen,
+					gfp);
+}
 
 static inline struct cfg80211_bss * __must_check
 cfg80211_inform_bss(struct wiphy *wiphy,
@@ -4032,11 +4124,15 @@
 		    u16 beacon_interval, const u8 *ie, size_t ielen,
 		    s32 signal, gfp_t gfp)
 {
-	return cfg80211_inform_bss_width(wiphy, rx_channel,
-					 NL80211_BSS_CHAN_WIDTH_20, ftype,
-					 bssid, tsf, capability,
-					 beacon_interval, ie, ielen, signal,
-					 gfp);
+	struct cfg80211_inform_bss data = {
+		.chan = rx_channel,
+		.scan_width = NL80211_BSS_CHAN_WIDTH_20,
+		.signal = signal,
+	};
+
+	return cfg80211_inform_bss_data(wiphy, &data, ftype, bssid, tsf,
+					capability, beacon_interval, ie, ielen,
+					gfp);
 }
 
 struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 76b1ffa..171cd76 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -27,6 +27,16 @@
 struct wpan_phy;
 struct wpan_phy_cca;
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+struct ieee802154_llsec_device_key;
+struct ieee802154_llsec_seclevel;
+struct ieee802154_llsec_params;
+struct ieee802154_llsec_device;
+struct ieee802154_llsec_table;
+struct ieee802154_llsec_key_id;
+struct ieee802154_llsec_key;
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 struct cfg802154_ops {
 	struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
 							   const char *name,
@@ -65,6 +75,51 @@
 				struct wpan_dev *wpan_dev, bool mode);
 	int	(*set_ackreq_default)(struct wpan_phy *wpan_phy,
 				      struct wpan_dev *wpan_dev, bool ackreq);
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	void	(*get_llsec_table)(struct wpan_phy *wpan_phy,
+				   struct wpan_dev *wpan_dev,
+				   struct ieee802154_llsec_table **table);
+	void	(*lock_llsec_table)(struct wpan_phy *wpan_phy,
+				    struct wpan_dev *wpan_dev);
+	void	(*unlock_llsec_table)(struct wpan_phy *wpan_phy,
+				      struct wpan_dev *wpan_dev);
+	/* TODO remove locking/get table callbacks, this is part of the
+	 * nl802154 interface and should be accessible from ieee802154 layer.
+	 */
+	int	(*get_llsec_params)(struct wpan_phy *wpan_phy,
+				    struct wpan_dev *wpan_dev,
+				    struct ieee802154_llsec_params *params);
+	int	(*set_llsec_params)(struct wpan_phy *wpan_phy,
+				    struct wpan_dev *wpan_dev,
+				    const struct ieee802154_llsec_params *params,
+				    int changed);
+	int	(*add_llsec_key)(struct wpan_phy *wpan_phy,
+				 struct wpan_dev *wpan_dev,
+				 const struct ieee802154_llsec_key_id *id,
+				 const struct ieee802154_llsec_key *key);
+	int	(*del_llsec_key)(struct wpan_phy *wpan_phy,
+				 struct wpan_dev *wpan_dev,
+				 const struct ieee802154_llsec_key_id *id);
+	int	(*add_seclevel)(struct wpan_phy *wpan_phy,
+				 struct wpan_dev *wpan_dev,
+				 const struct ieee802154_llsec_seclevel *sl);
+	int	(*del_seclevel)(struct wpan_phy *wpan_phy,
+				 struct wpan_dev *wpan_dev,
+				 const struct ieee802154_llsec_seclevel *sl);
+	int	(*add_device)(struct wpan_phy *wpan_phy,
+			      struct wpan_dev *wpan_dev,
+			      const struct ieee802154_llsec_device *dev);
+	int	(*del_device)(struct wpan_phy *wpan_phy,
+			      struct wpan_dev *wpan_dev, __le64 extended_addr);
+	int	(*add_devkey)(struct wpan_phy *wpan_phy,
+			      struct wpan_dev *wpan_dev,
+			      __le64 extended_addr,
+			      const struct ieee802154_llsec_device_key *key);
+	int	(*del_devkey)(struct wpan_phy *wpan_phy,
+			      struct wpan_dev *wpan_dev,
+			      __le64 extended_addr,
+			      const struct ieee802154_llsec_device_key *key);
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 };
 
 static inline bool
@@ -167,6 +222,102 @@
 	char priv[0] __aligned(NETDEV_ALIGN);
 };
 
+struct ieee802154_addr {
+	u8 mode;
+	__le16 pan_id;
+	union {
+		__le16 short_addr;
+		__le64 extended_addr;
+	};
+};
+
+struct ieee802154_llsec_key_id {
+	u8 mode;
+	u8 id;
+	union {
+		struct ieee802154_addr device_addr;
+		__le32 short_source;
+		__le64 extended_source;
+	};
+};
+
+#define IEEE802154_LLSEC_KEY_SIZE 16
+
+struct ieee802154_llsec_key {
+	u8 frame_types;
+	u32 cmd_frame_ids;
+	/* TODO replace with NL802154_KEY_SIZE */
+	u8 key[IEEE802154_LLSEC_KEY_SIZE];
+};
+
+struct ieee802154_llsec_key_entry {
+	struct list_head list;
+
+	struct ieee802154_llsec_key_id id;
+	struct ieee802154_llsec_key *key;
+};
+
+struct ieee802154_llsec_params {
+	bool enabled;
+
+	__be32 frame_counter;
+	u8 out_level;
+	struct ieee802154_llsec_key_id out_key;
+
+	__le64 default_key_source;
+
+	__le16 pan_id;
+	__le64 hwaddr;
+	__le64 coord_hwaddr;
+	__le16 coord_shortaddr;
+};
+
+struct ieee802154_llsec_table {
+	struct list_head keys;
+	struct list_head devices;
+	struct list_head security_levels;
+};
+
+struct ieee802154_llsec_seclevel {
+	struct list_head list;
+
+	u8 frame_type;
+	u8 cmd_frame_id;
+	bool device_override;
+	u32 sec_levels;
+};
+
+struct ieee802154_llsec_device {
+	struct list_head list;
+
+	__le16 pan_id;
+	__le16 short_addr;
+	__le64 hwaddr;
+	u32 frame_counter;
+	bool seclevel_exempt;
+
+	u8 key_mode;
+	struct list_head keys;
+};
+
+struct ieee802154_llsec_device_key {
+	struct list_head list;
+
+	struct ieee802154_llsec_key_id key_id;
+	u32 frame_counter;
+};
+
+struct wpan_dev_header_ops {
+	/* TODO create callback currently assumes ieee802154_mac_cb inside
+	 * skb->cb. This should be changed to give these information as
+	 * parameter.
+	 */
+	int	(*create)(struct sk_buff *skb, struct net_device *dev,
+			  const struct ieee802154_addr *daddr,
+			  const struct ieee802154_addr *saddr,
+			  unsigned int len);
+};
+
 struct wpan_dev {
 	struct wpan_phy *wpan_phy;
 	int iftype;
@@ -175,6 +326,8 @@
 	struct list_head list;
 	struct net_device *netdev;
 
+	const struct wpan_dev_header_ops *header_ops;
+
 	/* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
 	struct net_device *lowpan_dev;
 
@@ -205,6 +358,17 @@
 
 #define to_phy(_dev)	container_of(_dev, struct wpan_phy, dev)
 
+static inline int
+wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+		     const struct ieee802154_addr *daddr,
+		     const struct ieee802154_addr *saddr,
+		     unsigned int len)
+{
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+
+	return wpan_dev->header_ops->create(skb, dev, daddr, saddr, len);
+}
+
 struct wpan_phy *
 wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size);
 static inline void wpan_phy_set_dev(struct wpan_phy *phy, struct device *dev)
diff --git a/include/net/dsa.h b/include/net/dsa.h
index b34d812..98ccbde 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -197,6 +197,10 @@
 		return ds->pd->rtable[dst->cpu_switch];
 }
 
+struct switchdev_trans;
+struct switchdev_obj;
+struct switchdev_obj_port_fdb;
+
 struct dsa_switch_driver {
 	struct list_head	list;
 
@@ -316,13 +320,17 @@
 	/*
 	 * Forwarding database
 	 */
+	int	(*port_fdb_prepare)(struct dsa_switch *ds, int port,
+				    const struct switchdev_obj_port_fdb *fdb,
+				    struct switchdev_trans *trans);
 	int	(*port_fdb_add)(struct dsa_switch *ds, int port,
-				const unsigned char *addr, u16 vid);
+				const struct switchdev_obj_port_fdb *fdb,
+				struct switchdev_trans *trans);
 	int	(*port_fdb_del)(struct dsa_switch *ds, int port,
-				const unsigned char *addr, u16 vid);
-	int	(*port_fdb_getnext)(struct dsa_switch *ds, int port,
-				    unsigned char *addr, u16 *vid,
-				    bool *is_static);
+				const struct switchdev_obj_port_fdb *fdb);
+	int	(*port_fdb_dump)(struct dsa_switch *ds, int port,
+				 struct switchdev_obj_port_fdb *fdb,
+				 int (*cb)(struct switchdev_obj *obj));
 };
 
 void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/dst.h b/include/net/dst.h
index 779206c..1279f9b 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -45,7 +45,7 @@
 	void			*__pad1;
 #endif
 	int			(*input)(struct sk_buff *);
-	int			(*output)(struct sock *sk, struct sk_buff *skb);
+	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
 
 	unsigned short		flags;
 #define DST_HOST		0x0001
@@ -365,10 +365,10 @@
 	__skb_tunnel_rx(skb, dev, net);
 }
 
-int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
+int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 static inline int dst_discard(struct sk_buff *skb)
 {
-	return dst_discard_sk(skb->sk, skb);
+	return dst_discard_out(&init_net, skb->sk, skb);
 }
 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
 		int initial_obsolete, unsigned short flags);
@@ -454,13 +454,9 @@
 }
 
 /* Output packet to network from transport.  */
-static inline int dst_output(struct sock *sk, struct sk_buff *skb)
+static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	return skb_dst(skb)->output(sk, skb);
-}
-static inline int dst_output_okfn(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
-	return dst_output(sk, skb);
+	return skb_dst(skb)->output(net, sk, skb);
 }
 
 /* Input packet from network to transport.  */
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index af9d538..ce00971 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -60,6 +60,38 @@
 	return tun_dst;
 }
 
+static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
+{
+	struct metadata_dst *md_dst = skb_metadata_dst(skb);
+	int md_size = md_dst->u.tun_info.options_len;
+	struct metadata_dst *new_md;
+
+	if (!md_dst)
+		return ERR_PTR(-EINVAL);
+
+	new_md = metadata_dst_alloc(md_size, GFP_ATOMIC);
+	if (!new_md)
+		return ERR_PTR(-ENOMEM);
+
+	memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
+	       sizeof(struct ip_tunnel_info) + md_size);
+	skb_dst_drop(skb);
+	dst_hold(&new_md->dst);
+	skb_dst_set(skb, &new_md->dst);
+	return new_md;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb)
+{
+	struct metadata_dst *dst;
+
+	dst = tun_dst_unclone(skb);
+	if (IS_ERR(dst))
+		return NULL;
+
+	return &dst->u.tun_info;
+}
+
 static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
 						 __be16 flags,
 						 __be64 tunnel_id,
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index d642539..a0d443c 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -9,6 +9,7 @@
 struct net_device;
 struct sk_buff;
 struct sock;
+struct net;
 
 struct dst_ops {
 	unsigned short		family;
@@ -28,7 +29,7 @@
 					       struct sk_buff *skb, u32 mtu);
 	void			(*redirect)(struct dst_entry *dst, struct sock *sk,
 					    struct sk_buff *skb);
-	int			(*local_out)(struct sk_buff *skb);
+	int			(*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
 	struct neighbour *	(*neigh_lookup)(const struct dst_entry *dst,
 						struct sk_buff *skb,
 						const void *daddr);
diff --git a/include/net/flow.h b/include/net/flow.h
index 9b85db8..83969ee 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -34,7 +34,7 @@
 	__u8	flowic_flags;
 #define FLOWI_FLAG_ANYSRC		0x01
 #define FLOWI_FLAG_KNOWN_NH		0x02
-#define FLOWI_FLAG_VRFSRC		0x04
+#define FLOWI_FLAG_L3MDEV_SRC		0x04
 #define FLOWI_FLAG_SKIP_NH_OIF		0x08
 	__u32	flowic_secid;
 	struct flowi_tunnel flowic_tun_key;
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 2c10a9f..a62a051 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -50,15 +50,6 @@
 	};
 };
 
-struct ieee802154_addr {
-	u8 mode;
-	__le16 pan_id;
-	union {
-		__le16 short_addr;
-		__le64 extended_addr;
-	};
-};
-
 struct ieee802154_hdr_fc {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
 	u16 type:3,
@@ -99,7 +90,7 @@
  * hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
  * version, if SECEN is set.
  */
-int ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr);
+int ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr);
 
 /* pulls the entire 802.15.4 header off of the skb, including the security
  * header, and performs pan id decompression
@@ -243,38 +234,6 @@
 	return mac_cb(skb);
 }
 
-#define IEEE802154_LLSEC_KEY_SIZE 16
-
-struct ieee802154_llsec_key_id {
-	u8 mode;
-	u8 id;
-	union {
-		struct ieee802154_addr device_addr;
-		__le32 short_source;
-		__le64 extended_source;
-	};
-};
-
-struct ieee802154_llsec_key {
-	u8 frame_types;
-	u32 cmd_frame_ids;
-	u8 key[IEEE802154_LLSEC_KEY_SIZE];
-};
-
-struct ieee802154_llsec_key_entry {
-	struct list_head list;
-
-	struct ieee802154_llsec_key_id id;
-	struct ieee802154_llsec_key *key;
-};
-
-struct ieee802154_llsec_device_key {
-	struct list_head list;
-
-	struct ieee802154_llsec_key_id key_id;
-	u32 frame_counter;
-};
-
 enum {
 	IEEE802154_LLSEC_DEVKEY_IGNORE,
 	IEEE802154_LLSEC_DEVKEY_RESTRICT,
@@ -283,49 +242,6 @@
 	__IEEE802154_LLSEC_DEVKEY_MAX,
 };
 
-struct ieee802154_llsec_device {
-	struct list_head list;
-
-	__le16 pan_id;
-	__le16 short_addr;
-	__le64 hwaddr;
-	u32 frame_counter;
-	bool seclevel_exempt;
-
-	u8 key_mode;
-	struct list_head keys;
-};
-
-struct ieee802154_llsec_seclevel {
-	struct list_head list;
-
-	u8 frame_type;
-	u8 cmd_frame_id;
-	bool device_override;
-	u32 sec_levels;
-};
-
-struct ieee802154_llsec_params {
-	bool enabled;
-
-	__be32 frame_counter;
-	u8 out_level;
-	struct ieee802154_llsec_key_id out_key;
-
-	__le64 default_key_source;
-
-	__le16 pan_id;
-	__le64 hwaddr;
-	__le64 coord_hwaddr;
-	__le16 coord_shortaddr;
-};
-
-struct ieee802154_llsec_table {
-	struct list_head keys;
-	struct list_head devices;
-	struct list_head security_levels;
-};
-
 #define IEEE802154_MAC_SCAN_ED		0
 #define IEEE802154_MAC_SCAN_ACTIVE	1
 #define IEEE802154_MAC_SCAN_PASSIVE	2
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 81d937e..064cfbe 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -26,16 +26,7 @@
 			    const struct inet_bind_bucket *tb, bool relax);
 
 struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
-				      const struct request_sock *req);
-
-struct request_sock *inet6_csk_search_req(struct sock *sk,
-					  const __be16 rport,
-					  const struct in6_addr *raddr,
-					  const struct in6_addr *laddr,
-					  const int iif);
-
-void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
-				    const unsigned long timeout);
+				      const struct request_sock *req, u8 proto);
 
 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 00c3ced..481fe1c 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -41,9 +41,11 @@
 	int	    (*rebuild_header)(struct sock *sk);
 	void	    (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
 	int	    (*conn_request)(struct sock *sk, struct sk_buff *skb);
-	struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
+	struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
 				      struct request_sock *req,
-				      struct dst_entry *dst);
+				      struct dst_entry *dst,
+				      struct request_sock *req_unhash,
+				      bool *own_req);
 	u16	    net_header_len;
 	u16	    net_frag_header_len;
 	u16	    sockaddr_len;
@@ -258,31 +260,25 @@
 
 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
 
-struct request_sock *inet_csk_search_req(struct sock *sk,
-					 const __be16 rport,
-					 const __be32 raddr,
-					 const __be32 laddr);
 int inet_csk_bind_conflict(const struct sock *sk,
 			   const struct inet_bind_bucket *tb, bool relax);
 int inet_csk_get_port(struct sock *sk, unsigned short snum);
 
 struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
 				     const struct request_sock *req);
-struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk,
+struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
+					    struct sock *newsk,
 					    const struct request_sock *req);
 
-static inline void inet_csk_reqsk_queue_add(struct sock *sk,
-					    struct request_sock *req,
-					    struct sock *child)
-{
-	reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
-}
-
+void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
+			      struct sock *child);
 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 				   unsigned long timeout);
+struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+					 struct request_sock *req,
+					 bool own_req);
 
-static inline void inet_csk_reqsk_queue_added(struct sock *sk,
-					      const unsigned long timeout)
+static inline void inet_csk_reqsk_queue_added(struct sock *sk)
 {
 	reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
 }
@@ -299,10 +295,11 @@
 
 static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
 {
-	return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
+	return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
 }
 
 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
 
 void inet_csk_destroy_sock(struct sock *sk);
 void inet_csk_prepare_forced_close(struct sock *sk);
@@ -316,7 +313,7 @@
 			(POLLIN | POLLRDNORM) : 0;
 }
 
-int inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
+int inet_csk_listen_start(struct sock *sk, int backlog);
 void inet_csk_listen_stop(struct sock *sk);
 
 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index b07d126..de2e3ad 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -199,13 +199,14 @@
 }
 
 /* Caller must disable local BH processing. */
-int __inet_inherit_port(struct sock *sk, struct sock *child);
+int __inet_inherit_port(const struct sock *sk, struct sock *child);
 
 void inet_put_port(struct sock *sk);
 
 void inet_hashinfo_init(struct inet_hashinfo *h);
 
-void __inet_hash_nolisten(struct sock *sk, struct sock *osk);
+bool inet_ehash_insert(struct sock *sk, struct sock *osk);
+bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
 void __inet_hash(struct sock *sk, struct sock *osk);
 void inet_hash(struct sock *sk);
 void inet_unhash(struct sock *sk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 47eb67b..f5bf731 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -245,7 +245,8 @@
 }
 
 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
-				      struct sock *sk_listener);
+				      struct sock *sk_listener,
+				      bool attach_listener);
 
 static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
 {
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 186f3a1..c9b3eb7 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -70,6 +70,7 @@
 #define tw_dport		__tw_common.skc_dport
 #define tw_num			__tw_common.skc_num
 #define tw_cookie		__tw_common.skc_cookie
+#define tw_dr			__tw_common.skc_tw_dr
 
 	int			tw_timeout;
 	volatile unsigned char	tw_substate;
@@ -88,7 +89,6 @@
 	kmemcheck_bitfield_end(flags);
 	struct timer_list	tw_timer;
 	struct inet_bind_bucket	*tw_tb;
-	struct inet_timewait_death_row *tw_dr;
 };
 #define tw_tclass tw_tos
 
@@ -113,12 +113,12 @@
 void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
 			  bool rearm);
 
-static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
+static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
 {
 	__inet_twsk_schedule(tw, timeo, false);
 }
 
-static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
+static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
 {
 	__inet_twsk_schedule(tw, timeo, true);
 }
diff --git a/include/net/ip.h b/include/net/ip.h
index 91a6b2c..1a98f1c 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -107,17 +107,13 @@
 	   struct net_device *orig_dev);
 int ip_local_deliver(struct sk_buff *skb);
 int ip_mr_input(struct sk_buff *skb);
-int ip_output(struct sock *sk, struct sk_buff *skb);
-int ip_mc_output(struct sock *sk, struct sk_buff *skb);
-int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
-		   int (*output)(struct sock *, struct sk_buff *));
+int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+		   int (*output)(struct net *, struct sock *, struct sk_buff *));
 void ip_send_check(struct iphdr *ip);
-int __ip_local_out(struct sk_buff *skb);
-int ip_local_out_sk(struct sock *sk, struct sk_buff *skb);
-static inline int ip_local_out(struct sk_buff *skb)
-{
-	return ip_local_out_sk(skb->sk, skb);
-}
+int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 
 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
 void ip_init(void);
@@ -323,12 +319,15 @@
 
 static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
 {
-	if (!skb->sk || ip_sk_use_pmtu(skb->sk)) {
+	struct sock *sk = skb->sk;
+
+	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
 		bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
+
 		return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
-	} else {
-		return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
 	}
+
+	return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
 }
 
 u32 ip_idents_reserve(u32 hash, int segs);
@@ -507,11 +506,11 @@
 	return user >= lower_bond && user <= upper_bond;
 }
 
-int ip_defrag(struct sk_buff *skb, u32 user);
+int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
 #ifdef CONFIG_INET
-struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
+struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
 #else
-static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
 {
 	return skb;
 }
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 297629a..2bfb2ad 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -173,8 +173,8 @@
 		 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
 }
 
-int ip6_fragment(struct sock *sk, struct sk_buff *skb,
-		 int (*output)(struct sock *, struct sk_buff *));
+int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+		 int (*output)(struct net *, struct sock *, struct sk_buff *));
 
 static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
 {
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index fa915fa..aaee6fa 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -87,7 +87,7 @@
 	int pkt_len, err;
 
 	pkt_len = skb->len - skb_inner_network_offset(skb);
-	err = ip6_local_out_sk(sk, skb);
+	err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
 
 	if (net_xmit_eval(err) == 0) {
 		struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 727d6e9..ac5c6e8 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -79,7 +79,7 @@
 	unsigned char		nh_scope;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	int			nh_weight;
-	int			nh_power;
+	atomic_t		nh_upper_bound;
 #endif
 #ifdef CONFIG_IP_ROUTE_CLASSID
 	__u32			nh_tclassid;
@@ -118,7 +118,7 @@
 #define fib_advmss fib_metrics[RTAX_ADVMSS-1]
 	int			fib_nhs;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-	int			fib_power;
+	int			fib_weight;
 #endif
 	struct rcu_head		rcu;
 	struct fib_nh		fib_nh[0];
@@ -320,7 +320,17 @@
 int fib_sync_down_dev(struct net_device *dev, unsigned long event);
 int fib_sync_down_addr(struct net *net, __be32 local);
 int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
-void fib_select_multipath(struct fib_result *res);
+
+extern u32 fib_multipath_secret __read_mostly;
+
+static inline int fib_multipath_hash(__be32 saddr, __be32 daddr)
+{
+	return jhash_2words(saddr, daddr, fib_multipath_secret) >> 1;
+}
+
+void fib_select_multipath(struct fib_result *res, int hash);
+void fib_select_path(struct net *net, struct fib_result *res,
+		     struct flowi4 *fl4, int mp_hash);
 
 /* Exported by fib_trie.c */
 void fib_trie_init(void);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 1096a71..0816c87 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -38,59 +38,6 @@
 	return net->ipvs;
 }
 
-/* Get net ptr from skb in traffic cases
- * use skb_sknet when call is from userland (ioctl or netlink)
- */
-static inline struct net *skb_net(const struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_NS
-#ifdef CONFIG_IP_VS_DEBUG
-	/*
-	 * This is used for debug only.
-	 * Start with the most likely hit
-	 * End with BUG
-	 */
-	if (likely(skb->dev && dev_net(skb->dev)))
-		return dev_net(skb->dev);
-	if (skb_dst(skb) && skb_dst(skb)->dev)
-		return dev_net(skb_dst(skb)->dev);
-	WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
-		      __func__, __LINE__);
-	if (likely(skb->sk && sock_net(skb->sk)))
-		return sock_net(skb->sk);
-	pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
-		__func__, __LINE__);
-	BUG();
-#else
-	return dev_net(skb->dev ? : skb_dst(skb)->dev);
-#endif
-#else
-	return &init_net;
-#endif
-}
-
-static inline struct net *skb_sknet(const struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_NS
-#ifdef CONFIG_IP_VS_DEBUG
-	/* Start with the most likely hit */
-	if (likely(skb->sk && sock_net(skb->sk)))
-		return sock_net(skb->sk);
-	WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
-		       __func__, __LINE__);
-	if (likely(skb->dev && dev_net(skb->dev)))
-		return dev_net(skb->dev);
-	pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
-		__func__, __LINE__);
-	BUG();
-#else
-	return sock_net(skb->sk);
-#endif
-#else
-	return &init_net;
-#endif
-}
-
 /* This one needed for single_open_net since net is stored directly in
  * private not as a struct i.e. seq_file_net can't be used.
  */
@@ -483,22 +430,25 @@
 
 	void (*exit)(struct ip_vs_protocol *pp);
 
-	int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
+	int (*init_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd);
 
-	void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
+	void (*exit_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd);
 
-	int (*conn_schedule)(int af, struct sk_buff *skb,
+	int (*conn_schedule)(struct netns_ipvs *ipvs,
+			     int af, struct sk_buff *skb,
 			     struct ip_vs_proto_data *pd,
 			     int *verdict, struct ip_vs_conn **cpp,
 			     struct ip_vs_iphdr *iph);
 
 	struct ip_vs_conn *
-	(*conn_in_get)(int af,
+	(*conn_in_get)(struct netns_ipvs *ipvs,
+		       int af,
 		       const struct sk_buff *skb,
 		       const struct ip_vs_iphdr *iph);
 
 	struct ip_vs_conn *
-	(*conn_out_get)(int af,
+	(*conn_out_get)(struct netns_ipvs *ipvs,
+			int af,
 			const struct sk_buff *skb,
 			const struct ip_vs_iphdr *iph);
 
@@ -517,9 +467,9 @@
 				 const struct sk_buff *skb,
 				 struct ip_vs_proto_data *pd);
 
-	int (*register_app)(struct net *net, struct ip_vs_app *inc);
+	int (*register_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc);
 
-	void (*unregister_app)(struct net *net, struct ip_vs_app *inc);
+	void (*unregister_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc);
 
 	int (*app_conn_bind)(struct ip_vs_conn *cp);
 
@@ -541,11 +491,11 @@
 };
 
 struct ip_vs_protocol   *ip_vs_proto_get(unsigned short proto);
-struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+struct ip_vs_proto_data *ip_vs_proto_data_get(struct netns_ipvs *ipvs,
 					      unsigned short proto);
 
 struct ip_vs_conn_param {
-	struct net			*net;
+	struct netns_ipvs		*ipvs;
 	const union nf_inet_addr	*caddr;
 	const union nf_inet_addr	*vaddr;
 	__be16				cport;
@@ -572,9 +522,7 @@
 	volatile __u32          flags;          /* status flags */
 	__u16                   protocol;       /* Which protocol (TCP/UDP) */
 	__u16			daf;		/* Address family of the dest */
-#ifdef CONFIG_NET_NS
-	struct net              *net;           /* Name space */
-#endif
+	struct netns_ipvs	*ipvs;
 
 	/* counter and timer */
 	atomic_t		refcnt;		/* reference count */
@@ -621,33 +569,6 @@
 	struct rcu_head		rcu_head;
 };
 
-/* To save some memory in conn table when name space is disabled. */
-static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
-{
-#ifdef CONFIG_NET_NS
-	return cp->net;
-#else
-	return &init_net;
-#endif
-}
-
-static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
-{
-#ifdef CONFIG_NET_NS
-	cp->net = net;
-#endif
-}
-
-static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
-				    struct net *net)
-{
-#ifdef CONFIG_NET_NS
-	return cp->net == net;
-#else
-	return 1;
-#endif
-}
-
 /* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user
  * for IPv6 support.
  *
@@ -707,7 +628,7 @@
 	unsigned int		flags;	  /* service status flags */
 	unsigned int		timeout;  /* persistent timeout in ticks */
 	__be32			netmask;  /* grouping granularity, mask/plen */
-	struct net		*net;
+	struct netns_ipvs	*ipvs;
 
 	struct list_head	destinations;  /* real server d-linked list */
 	__u32			num_dests;     /* number of servers */
@@ -1127,6 +1048,11 @@
 	return ipvs->sysctl_ignore_tunneled;
 }
 
+static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
+{
+	return ipvs->sysctl_cache_bypass;
+}
+
 #else
 
 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1209,6 +1135,11 @@
 	return 0;
 }
 
+static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs)
+{
+	return 0;
+}
+
 #endif
 
 /* IPVS core functions
@@ -1230,14 +1161,14 @@
 	IP_VS_DIR_LAST,
 };
 
-static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol,
+static inline void ip_vs_conn_fill_param(struct netns_ipvs *ipvs, int af, int protocol,
 					 const union nf_inet_addr *caddr,
 					 __be16 cport,
 					 const union nf_inet_addr *vaddr,
 					 __be16 vport,
 					 struct ip_vs_conn_param *p)
 {
-	p->net = net;
+	p->ipvs = ipvs;
 	p->af = af;
 	p->protocol = protocol;
 	p->caddr = caddr;
@@ -1251,12 +1182,14 @@
 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
 
-struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
+struct ip_vs_conn * ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af,
+					    const struct sk_buff *skb,
 					    const struct ip_vs_iphdr *iph);
 
 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
 
-struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
+struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
+					     const struct sk_buff *skb,
 					     const struct ip_vs_iphdr *iph);
 
 /* Get reference to gain full access to conn.
@@ -1285,9 +1218,9 @@
 
 const char *ip_vs_state_name(__u16 proto, int state);
 
-void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
+void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
 int ip_vs_check_template(struct ip_vs_conn *ct);
-void ip_vs_random_dropentry(struct net *net);
+void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
 int ip_vs_conn_init(void);
 void ip_vs_conn_cleanup(void);
 
@@ -1352,29 +1285,29 @@
 }
 
 /* IPVS netns init & cleanup functions */
-int ip_vs_estimator_net_init(struct net *net);
-int ip_vs_control_net_init(struct net *net);
-int ip_vs_protocol_net_init(struct net *net);
-int ip_vs_app_net_init(struct net *net);
-int ip_vs_conn_net_init(struct net *net);
-int ip_vs_sync_net_init(struct net *net);
-void ip_vs_conn_net_cleanup(struct net *net);
-void ip_vs_app_net_cleanup(struct net *net);
-void ip_vs_protocol_net_cleanup(struct net *net);
-void ip_vs_control_net_cleanup(struct net *net);
-void ip_vs_estimator_net_cleanup(struct net *net);
-void ip_vs_sync_net_cleanup(struct net *net);
-void ip_vs_service_net_cleanup(struct net *net);
+int ip_vs_estimator_net_init(struct netns_ipvs *ipvs);
+int ip_vs_control_net_init(struct netns_ipvs *ipvs);
+int ip_vs_protocol_net_init(struct netns_ipvs *ipvs);
+int ip_vs_app_net_init(struct netns_ipvs *ipvs);
+int ip_vs_conn_net_init(struct netns_ipvs *ipvs);
+int ip_vs_sync_net_init(struct netns_ipvs *ipvs);
+void ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_app_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_control_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_service_net_cleanup(struct netns_ipvs *ipvs);
 
 /* IPVS application functions
  * (from ip_vs_app.c)
  */
 #define IP_VS_APP_MAX_PORTS  8
-struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app);
-void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
+struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app);
+void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app);
 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
 void ip_vs_unbind_app(struct ip_vs_conn *cp);
-int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+int register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
 			   __u16 port);
 int ip_vs_app_inc_get(struct ip_vs_app *inc);
 void ip_vs_app_inc_put(struct ip_vs_app *inc);
@@ -1439,10 +1372,10 @@
 extern int sysctl_ip_vs_sync_ver;
 
 struct ip_vs_service *
-ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol,
 		  const union nf_inet_addr *vaddr, __be16 vport);
 
-bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
+bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
 			    const union nf_inet_addr *daddr, __be16 dport);
 
 int ip_vs_use_count_inc(void);
@@ -1452,7 +1385,7 @@
 int ip_vs_control_init(void);
 void ip_vs_control_cleanup(void);
 struct ip_vs_dest *
-ip_vs_find_dest(struct net *net, int svc_af, int dest_af,
+ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af,
 		const union nf_inet_addr *daddr, __be16 dport,
 		const union nf_inet_addr *vaddr, __be16 vport,
 		__u16 protocol, __u32 fwmark, __u32 flags);
@@ -1478,14 +1411,14 @@
 /* IPVS sync daemon data and function prototypes
  * (from ip_vs_sync.c)
  */
-int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *cfg,
+int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *cfg,
 		      int state);
-int stop_sync_thread(struct net *net, int state);
-void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
+int stop_sync_thread(struct netns_ipvs *ipvs, int state);
+void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts);
 
 /* IPVS rate estimator prototypes (from ip_vs_est.c) */
-void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
-void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
+void ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats);
+void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats);
 void ip_vs_zero_estimator(struct ip_vs_stats *stats);
 void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats);
 
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 3dde042..e1a10b0 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -860,14 +860,13 @@
  *	skb processing functions
  */
 
-int ip6_output(struct sock *sk, struct sk_buff *skb);
+int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 int ip6_forward(struct sk_buff *skb);
 int ip6_input(struct sk_buff *skb);
 int ip6_mc_input(struct sk_buff *skb);
 
-int __ip6_local_out(struct sk_buff *skb);
-int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb);
-int ip6_local_out(struct sk_buff *skb);
+int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 
 /*
  *	Extension header (options) processing
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
new file mode 100644
index 0000000..774d85b
--- /dev/null
+++ b/include/net/l3mdev.h
@@ -0,0 +1,222 @@
+/*
+ * include/net/l3mdev.h - L3 master device API
+ * Copyright (c) 2015 Cumulus Networks
+ * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _NET_L3MDEV_H_
+#define _NET_L3MDEV_H_
+
+/**
+ * struct l3mdev_ops - l3mdev operations
+ *
+ * @l3mdev_fib_table: Get FIB table id to use for lookups
+ *
+ * @l3mdev_get_rtable: Get cached IPv4 rtable (dst_entry) for device
+ *
+ * @l3mdev_get_saddr: Get source address for a flow
+ *
+ * @l3mdev_get_rt6_dst: Get cached IPv6 rt6_info (dst_entry) for device
+ */
+
+struct l3mdev_ops {
+	u32		(*l3mdev_fib_table)(const struct net_device *dev);
+
+	/* IPv4 ops */
+	struct rtable *	(*l3mdev_get_rtable)(const struct net_device *dev,
+					     const struct flowi4 *fl4);
+	void		(*l3mdev_get_saddr)(struct net_device *dev,
+					    struct flowi4 *fl4);
+
+	/* IPv6 ops */
+	struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *dev,
+						 const struct flowi6 *fl6);
+};
+
+#ifdef CONFIG_NET_L3_MASTER_DEV
+
+int l3mdev_master_ifindex_rcu(struct net_device *dev);
+static inline int l3mdev_master_ifindex(struct net_device *dev)
+{
+	int ifindex;
+
+	rcu_read_lock();
+	ifindex = l3mdev_master_ifindex_rcu(dev);
+	rcu_read_unlock();
+
+	return ifindex;
+}
+
+/* get index of an interface to use for FIB lookups. For devices
+ * enslaved to an L3 master device FIB lookups are based on the
+ * master index
+ */
+static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
+{
+	return l3mdev_master_ifindex_rcu(dev) ? : dev->ifindex;
+}
+
+static inline int l3mdev_fib_oif(struct net_device *dev)
+{
+	int oif;
+
+	rcu_read_lock();
+	oif = l3mdev_fib_oif_rcu(dev);
+	rcu_read_unlock();
+
+	return oif;
+}
+
+u32 l3mdev_fib_table_rcu(const struct net_device *dev);
+u32 l3mdev_fib_table_by_index(struct net *net, int ifindex);
+static inline u32 l3mdev_fib_table(const struct net_device *dev)
+{
+	u32 tb_id;
+
+	rcu_read_lock();
+	tb_id = l3mdev_fib_table_rcu(dev);
+	rcu_read_unlock();
+
+	return tb_id;
+}
+
+static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
+					       const struct flowi4 *fl4)
+{
+	if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rtable)
+		return dev->l3mdev_ops->l3mdev_get_rtable(dev, fl4);
+
+	return NULL;
+}
+
+static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
+{
+	struct net_device *dev;
+	bool rc = false;
+
+	if (ifindex == 0)
+		return false;
+
+	rcu_read_lock();
+
+	dev = dev_get_by_index_rcu(net, ifindex);
+	if (dev)
+		rc = netif_is_l3_master(dev);
+
+	rcu_read_unlock();
+
+	return rc;
+}
+
+static inline void l3mdev_get_saddr(struct net *net, int ifindex,
+				    struct flowi4 *fl4)
+{
+	struct net_device *dev;
+
+	if (ifindex) {
+
+		rcu_read_lock();
+
+		dev = dev_get_by_index_rcu(net, ifindex);
+		if (dev && netif_is_l3_master(dev) &&
+		    dev->l3mdev_ops->l3mdev_get_saddr) {
+			dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4);
+		}
+
+		rcu_read_unlock();
+	}
+}
+
+static inline struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
+						   const struct flowi6 *fl6)
+{
+	if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rt6_dst)
+		return dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6);
+
+	return NULL;
+}
+
+static inline
+struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
+					const struct flowi6 *fl6)
+{
+	struct dst_entry *dst = NULL;
+	struct net_device *dev;
+
+	dev = dev_get_by_index(net, fl6->flowi6_oif);
+	if (dev) {
+		dst = l3mdev_get_rt6_dst(dev, fl6);
+		dev_put(dev);
+	}
+
+	return dst;
+}
+
+#else
+
+static inline int l3mdev_master_ifindex_rcu(struct net_device *dev)
+{
+	return 0;
+}
+static inline int l3mdev_master_ifindex(struct net_device *dev)
+{
+	return 0;
+}
+
+static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
+{
+	return dev ? dev->ifindex : 0;
+}
+static inline int l3mdev_fib_oif(struct net_device *dev)
+{
+	return dev ? dev->ifindex : 0;
+}
+
+static inline u32 l3mdev_fib_table_rcu(const struct net_device *dev)
+{
+	return 0;
+}
+static inline u32 l3mdev_fib_table(const struct net_device *dev)
+{
+	return 0;
+}
+static inline u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
+{
+	return 0;
+}
+
+static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
+					       const struct flowi4 *fl4)
+{
+	return NULL;
+}
+
+static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
+{
+	return false;
+}
+
+static inline void l3mdev_get_saddr(struct net *net, int ifindex,
+				    struct flowi4 *fl4)
+{
+}
+
+static inline
+struct dst_entry *l3mdev_get_rt6_dst(const struct net_device *dev,
+				     const struct flowi6 *fl6)
+{
+	return NULL;
+}
+static inline
+struct dst_entry *l3mdev_rt6_dst_by_oif(struct net *net,
+					const struct flowi6 *fl6)
+{
+	return NULL;
+}
+#endif
+
+#endif /* _NET_L3MDEV_H_ */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index fce0e35..66350ce 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -18,7 +18,7 @@
 	__u16		type;
 	__u16		flags;
 	atomic_t	refcnt;
-	int		(*orig_output)(struct sock *sk, struct sk_buff *skb);
+	int		(*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
 	int		(*orig_input)(struct sk_buff *);
 	int             len;
 	__u8            data[0];
@@ -28,7 +28,7 @@
 	int (*build_state)(struct net_device *dev, struct nlattr *encap,
 			   unsigned int family, const void *cfg,
 			   struct lwtunnel_state **ts);
-	int (*output)(struct sock *sk, struct sk_buff *skb);
+	int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
 	int (*input)(struct sk_buff *skb);
 	int (*fill_encap)(struct sk_buff *skb,
 			  struct lwtunnel_state *lwtstate);
@@ -88,7 +88,7 @@
 int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
 struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
 int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
-int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
+int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 int lwtunnel_input(struct sk_buff *skb);
 
 #else
@@ -160,7 +160,7 @@
 	return 0;
 }
 
-static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+static inline int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	return -EOPNOTSUPP;
 }
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index bfc5694..4b9dd07 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -5,6 +5,7 @@
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -1240,11 +1241,6 @@
  * @flags: configuration flags defined above
  *
  * @listen_interval: listen interval in units of beacon interval
- * @max_sleep_period: the maximum number of beacon intervals to sleep for
- *	before checking the beacon for a TIM bit (managed mode only); this
- *	value will be only achievable between DTIM frames, the hardware
- *	needs to check for the multicast traffic bit in DTIM beacons.
- *	This variable is valid only when the CONF_PS flag is set.
  * @ps_dtim_period: The DTIM period of the AP we're connected to, for use
  *	in power saving. Power saving will not be enabled until a beacon
  *	has been received and the DTIM period is known.
@@ -1274,7 +1270,6 @@
 struct ieee80211_conf {
 	u32 flags;
 	int power_level, dynamic_ps_timeout;
-	int max_sleep_period;
 
 	u16 listen_interval;
 	u8 ps_dtim_period;
@@ -1360,6 +1355,8 @@
  * @debugfs_dir: debugfs dentry, can be used by drivers to create own per
  *	interface debug files. Note that it will be NULL for the virtual
  *	monitor interface (if that is requested.)
+ * @probe_req_reg: probe requests should be reported to mac80211 for this
+ *	interface.
  * @drv_priv: data area for driver use, will always be aligned to
  *	sizeof(void *).
  * @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
@@ -1384,6 +1381,8 @@
 	struct dentry *debugfs_dir;
 #endif
 
+	unsigned int probe_req_reg;
+
 	/* must be last */
 	u8 drv_priv[0] __aligned(sizeof(void *));
 };
@@ -1494,10 +1493,8 @@
  * 	- Temporal Authenticator Rx MIC Key (64 bits)
  * @icv_len: The ICV length for this key type
  * @iv_len: The IV length for this key type
- * @drv_priv: pointer for driver use
  */
 struct ieee80211_key_conf {
-	void *drv_priv;
 	atomic64_t tx_pn;
 	u32 cipher;
 	u8 icv_len;
@@ -1680,6 +1677,7 @@
  * @tdls: indicates whether the STA is a TDLS peer
  * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
  *	valid if the STA is a TDLS peer in the first place.
+ * @mfp: indicates whether the STA uses management frame protection or not.
  * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
  */
 struct ieee80211_sta {
@@ -1697,6 +1695,7 @@
 	struct ieee80211_sta_rates __rcu *rates;
 	bool tdls;
 	bool tdls_initiator;
+	bool mfp;
 
 	struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
 
@@ -1894,6 +1893,12 @@
  * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth
  *	than then BSS bandwidth for a TDLS link on the base channel.
  *
+ * @IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU: The driver supports receiving A-MSDUs
+ *	within A-MPDU.
+ *
+ * @IEEE80211_HW_BEACON_TX_STATUS: The device/driver provides TX status
+ *	for sent beacons.
+ *
  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
@@ -1927,6 +1932,8 @@
 	IEEE80211_HW_SUPPORTS_CLONED_SKBS,
 	IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
 	IEEE80211_HW_TDLS_WIDER_BW,
+	IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU,
+	IEEE80211_HW_BEACON_TX_STATUS,
 
 	/* keep last, obviously */
 	NUM_IEEE80211_HW_FLAGS
@@ -2827,6 +2834,13 @@
  *	See the section "Frame filtering" for more information.
  *	This callback must be implemented and can sleep.
  *
+ * @config_iface_filter: Configure the interface's RX filter.
+ *	This callback is optional and is used to configure which frames
+ *	should be passed to mac80211. The filter_flags is the combination
+ *	of FIF_* flags. The changed_flags is a bit mask that indicates
+ *	which flags are changed.
+ *	This callback can sleep.
+ *
  * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit
  * 	must be set or cleared for a given STA. Must be atomic.
  *
@@ -3016,6 +3030,9 @@
  *	buffer size of 8. Correct ways to retransmit #1 would be:
  *	 - TX:       1 or 18 or 81
  *	Even "189" would be wrong since 1 could be lost again.
+ *	The @amsdu parameter is valid when the action is set to
+ *	%IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's ability
+ *	to receive A-MSDU within A-MPDU.
  *
  *	Returns a negative error code on failure.
  *	The callback can sleep.
@@ -3266,6 +3283,10 @@
 				 unsigned int changed_flags,
 				 unsigned int *total_flags,
 				 u64 multicast);
+	void (*config_iface_filter)(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    unsigned int filter_flags,
+				    unsigned int changed_flags);
 	int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
 		       bool set);
 	int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -3349,7 +3370,7 @@
 			    struct ieee80211_vif *vif,
 			    enum ieee80211_ampdu_mlme_action action,
 			    struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-			    u8 buf_size);
+			    u8 buf_size, bool amsdu);
 	int (*get_survey)(struct ieee80211_hw *hw, int idx,
 		struct survey_info *survey);
 	void (*rfkill_poll)(struct ieee80211_hw *hw);
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 32bd7c0..da574bb 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -23,14 +23,6 @@
 
 #include <net/cfg802154.h>
 
-/* General MAC frame format:
- *  2 bytes: Frame Control
- *  1 byte:  Sequence Number
- * 20 bytes: Addressing fields
- * 14 bytes: Auxiliary Security Header
- */
-#define MAC802154_FRAME_HARD_HEADER_LEN		(2 + 1 + 20 + 14)
-
 /**
  * enum ieee802154_hw_addr_filt_flags - hardware address filtering flags
  *
@@ -256,7 +248,7 @@
 static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
 {
 	/* return some invalid fc on failure */
-	if (unlikely(skb->mac_len < 2)) {
+	if (unlikely(skb->len < 2)) {
 		WARN_ON(1);
 		return cpu_to_le16(0);
 	}
@@ -285,6 +277,16 @@
 }
 
 /**
+ * ieee802154_le16_to_be16 - copies and convert le16 to be16
+ * @be16_dst: be16 destination pointer
+ * @le16_src: le16 source pointer
+ */
+static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src)
+{
+	__put_unaligned_memmove16(swab16p(le16_src), be16_dst);
+}
+
+/**
  * ieee802154_alloc_hw - Allocate a new hardware device
  *
  * This must be called once for each hardware device. The returned pointer
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
index 4757997..179253f 100644
--- a/include/net/mpls_iptunnel.h
+++ b/include/net/mpls_iptunnel.h
@@ -18,7 +18,7 @@
 
 struct mpls_iptunnel_encap {
 	u32	label[MAX_NEW_LABELS];
-	u32	labels;
+	u8	labels;
 };
 
 static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index c93c75f..e8d1448 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -45,12 +45,12 @@
 void br_netfilter_enable(void);
 
 #if IS_ENABLED(CONFIG_IPV6)
-int br_validate_ipv6(struct sk_buff *skb);
+int br_validate_ipv6(struct net *net, struct sk_buff *skb);
 unsigned int br_nf_pre_routing_ipv6(void *priv,
 				    struct sk_buff *skb,
 				    const struct nf_hook_state *state);
 #else
-static inline int br_validate_ipv6(struct sk_buff *skb)
+static inline int br_validate_ipv6(struct net *net, struct sk_buff *skb)
 {
 	return -1;
 }
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index 77862c3..df7ecd8 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -6,7 +6,7 @@
 #include <net/icmp.h>
 
 void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
-void nf_send_reset(struct sk_buff *oldskb, int hook);
+void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook);
 
 const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
 					     struct tcphdr *_oth, int hook);
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
index 27666d8..fb7da5b 100644
--- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
@@ -5,7 +5,7 @@
 
 int nf_ct_frag6_init(void);
 void nf_ct_frag6_cleanup(void);
-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user);
 void nf_ct_frag6_consume_orig(struct sk_buff *skb);
 
 struct inet_frags_ctl;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index d642f68..fde4068 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -183,10 +183,6 @@
 
 void nf_ct_free_hashtable(void *hash, unsigned int size);
 
-struct nf_conntrack_tuple_hash *
-__nf_conntrack_find(struct net *net, u16 zone,
-		    const struct nf_conntrack_tuple *tuple);
-
 int nf_conntrack_hash_check_insert(struct nf_conn *ct);
 bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
 
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 6230871..f72be38 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -20,10 +20,20 @@
 };
 
 struct nf_conn_timeout {
-	struct ctnl_timeout	*timeout;
+	struct ctnl_timeout __rcu *timeout;
 };
 
-#define NF_CT_TIMEOUT_EXT_DATA(__t) (unsigned int *) &((__t)->timeout->data)
+static inline unsigned int *
+nf_ct_timeout_data(struct nf_conn_timeout *t)
+{
+	struct ctnl_timeout *timeout;
+
+	timeout = rcu_dereference(t->timeout);
+	if (timeout == NULL)
+		return NULL;
+
+	return (unsigned int *)timeout->data;
+}
 
 static inline
 struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct)
@@ -47,7 +57,7 @@
 	if (timeout_ext == NULL)
 		return NULL;
 
-	timeout_ext->timeout = timeout;
+	rcu_assign_pointer(timeout_ext->timeout, timeout);
 
 	return timeout_ext;
 #else
@@ -64,10 +74,13 @@
 	unsigned int *timeouts;
 
 	timeout_ext = nf_ct_timeout_find(ct);
-	if (timeout_ext)
-		timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
-	else
+	if (timeout_ext) {
+		timeouts = nf_ct_timeout_data(timeout_ext);
+		if (unlikely(!timeouts))
+			timeouts = l4proto->get_timeouts(net);
+	} else {
 		timeouts = l4proto->get_timeouts(net);
+	}
 
 	return timeouts;
 #else
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index e863585..9c5638a 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -32,7 +32,7 @@
 void nf_unregister_queue_handler(void);
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
 
-bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
 void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
 
 static inline void init_hashrandom(u32 *jhash_initval)
diff --git a/include/net/netfilter/nfnetlink_queue.h b/include/net/netfilter/nfnetlink_queue.h
deleted file mode 100644
index aff88ba..0000000
--- a/include/net/netfilter/nfnetlink_queue.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef _NET_NFNL_QUEUE_H_
-#define _NET_NFNL_QUEUE_H_
-
-#include <linux/netfilter/nf_conntrack_common.h>
-
-struct nf_conn;
-
-#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
-struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size,
-			     enum ip_conntrack_info *ctinfo);
-struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
-			       const struct nlattr *attr,
-			       enum ip_conntrack_info *ctinfo);
-int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
-		 enum ip_conntrack_info ctinfo);
-void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
-			 enum ip_conntrack_info ctinfo, int diff);
-int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
-			u32 portid, u32 report);
-#else
-inline struct nf_conn *
-nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo)
-{
-	return NULL;
-}
-
-inline struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
-				      const struct nlattr *attr,
-				      enum ip_conntrack_info *ctinfo)
-{
-	return NULL;
-}
-
-inline int
-nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo)
-{
-	return 0;
-}
-
-inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
-				enum ip_conntrack_info ctinfo, int diff)
-{
-}
-
-inline int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
-			       u32 portid, u32 report)
-{
-	return 0;
-}
-#endif /* NF_CONNTRACK */
-#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 2a5dbcc..0e31727 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -1004,6 +1004,15 @@
 }
 
 /**
+ * nla_get_le32 - return payload of __le32 attribute
+ * @nla: __le32 netlink attribute
+ */
+static inline __le32 nla_get_le32(const struct nlattr *nla)
+{
+	return *(__le32 *) nla_data(nla);
+}
+
+/**
  * nla_get_u16 - return payload of u16 attribute
  * @nla: u16 netlink attribute
  */
@@ -1066,6 +1075,15 @@
 }
 
 /**
+ * nla_get_le64 - return payload of __le64 attribute
+ * @nla: __le64 netlink attribute
+ */
+static inline __le64 nla_get_le64(const struct nlattr *nla)
+{
+	return *(__le64 *) nla_data(nla);
+}
+
+/**
  * nla_get_s32 - return payload of s32 attribute
  * @nla: s32 netlink attribute
  */
diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h
index 75d2e18..707e3ab 100644
--- a/include/net/nfc/nci.h
+++ b/include/net/nfc/nci.h
@@ -35,6 +35,7 @@
 #define NCI_MAX_NUM_RF_CONFIGS					10
 #define NCI_MAX_NUM_CONN					10
 #define NCI_MAX_PARAM_LEN					251
+#define NCI_MAX_PAYLOAD_SIZE					255
 #define NCI_MAX_PACKET_SIZE					258
 
 /* NCI Status Codes */
@@ -315,6 +316,8 @@
 	__u8	nfcee_mode;
 } __packed;
 
+#define NCI_OP_CORE_GET_CONFIG_CMD	nci_opcode_pack(NCI_GID_CORE, 0x03)
+
 /* ----------------------- */
 /* ---- NCI Responses ---- */
 /* ----------------------- */
@@ -375,6 +378,9 @@
 } __packed;
 
 #define NCI_OP_NFCEE_MODE_SET_RSP nci_opcode_pack(NCI_GID_NFCEE_MGMT, 0x01)
+
+#define NCI_OP_CORE_GET_CONFIG_RSP	nci_opcode_pack(NCI_GID_CORE, 0x03)
+
 /* --------------------------- */
 /* ---- NCI Notifications ---- */
 /* --------------------------- */
@@ -528,4 +534,6 @@
 	struct nci_nfcee_information_tlv	information_tlv;
 } __packed;
 
+#define NCI_OP_CORE_RESET_NTF		nci_opcode_pack(NCI_GID_CORE, 0x00)
+
 #endif /* __NCI_H */
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index d0d0f1e..57ce24f 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -67,7 +67,7 @@
 
 struct nci_dev;
 
-struct nci_prop_ops {
+struct nci_driver_ops {
 	__u16 opcode;
 	int (*rsp)(struct nci_dev *dev, struct sk_buff *skb);
 	int (*ntf)(struct nci_dev *dev, struct sk_buff *skb);
@@ -94,8 +94,11 @@
 	void  (*hci_cmd_received)(struct nci_dev *ndev, u8 pipe, u8 cmd,
 				  struct sk_buff *skb);
 
-	struct nci_prop_ops *prop_ops;
+	struct nci_driver_ops *prop_ops;
 	size_t n_prop_ops;
+
+	struct nci_driver_ops *core_ops;
+	size_t n_core_ops;
 };
 
 #define NCI_MAX_SUPPORTED_RF_INTERFACES		4
@@ -125,6 +128,8 @@
 
 /* Gates */
 #define NCI_HCI_ADMIN_GATE         0x00
+#define NCI_HCI_LOOPBACK_GATE	   0x04
+#define NCI_HCI_IDENTITY_MGMT_GATE 0x05
 #define NCI_HCI_LINK_MGMT_GATE     0x06
 
 /* Pipes */
@@ -278,10 +283,12 @@
 			    unsigned long opt),
 		unsigned long opt, __u32 timeout);
 int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
+int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len, __u8 *payload);
 int nci_core_reset(struct nci_dev *ndev);
 int nci_core_init(struct nci_dev *ndev);
 
 int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
+int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb);
 int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
 
 int nci_nfcee_discover(struct nci_dev *ndev, u8 action);
@@ -305,6 +312,7 @@
 		      const u8 *param, size_t param_len);
 int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
 		      struct sk_buff **skb);
+int nci_hci_clear_all_pipes(struct nci_dev *ndev);
 int nci_hci_dev_session_init(struct nci_dev *ndev);
 
 static inline struct sk_buff *nci_skb_alloc(struct nci_dev *ndev,
@@ -348,9 +356,14 @@
 			struct sk_buff *skb);
 int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 opcode,
 			struct sk_buff *skb);
+int nci_core_rsp_packet(struct nci_dev *ndev, __u16 opcode,
+			struct sk_buff *skb);
+int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode,
+			struct sk_buff *skb);
 void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb);
 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload);
 int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb);
+int nci_conn_max_data_pkt_payload_size(struct nci_dev *ndev, __u8 conn_id);
 void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
 				__u8 conn_id, int err);
 void nci_hci_data_received_cb(void *context, struct sk_buff *skb, int err);
@@ -365,6 +378,7 @@
 void nci_req_complete(struct nci_dev *ndev, int result);
 struct nci_conn_info *nci_get_conn_info_by_conn_id(struct nci_dev *ndev,
 						   int conn_id);
+int nci_get_conn_info_by_id(struct nci_dev *ndev, u8 id);
 
 /* ----- NCI status code ----- */
 int nci_to_errno(__u8 code);
@@ -380,6 +394,12 @@
 
 	unsigned int		xfer_udelay;	/* microseconds delay between
 						  transactions */
+
+	unsigned int		xfer_speed_hz; /*
+						* SPI clock frequency
+						* 0 => default clock
+						*/
+
 	u8			acknowledge_mode;
 
 	struct completion	req_completion;
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 30afc9a..dcfcfc9 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -68,7 +68,7 @@
 	int (*activate_target)(struct nfc_dev *dev, struct nfc_target *target,
 			       u32 protocol);
 	void (*deactivate_target)(struct nfc_dev *dev,
-				  struct nfc_target *target);
+				  struct nfc_target *target, u8 mode);
 	int (*im_transceive)(struct nfc_dev *dev, struct nfc_target *target,
 			     struct sk_buff *skb, data_exchange_cb_t cb,
 			     void *cb_context);
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index cf2713d..32cb3e5 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -56,6 +56,22 @@
 
 	/* add new commands above here */
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	NL802154_CMD_SET_SEC_PARAMS,
+	NL802154_CMD_GET_SEC_KEY,		/* can dump */
+	NL802154_CMD_NEW_SEC_KEY,
+	NL802154_CMD_DEL_SEC_KEY,
+	NL802154_CMD_GET_SEC_DEV,		/* can dump */
+	NL802154_CMD_NEW_SEC_DEV,
+	NL802154_CMD_DEL_SEC_DEV,
+	NL802154_CMD_GET_SEC_DEVKEY,		/* can dump */
+	NL802154_CMD_NEW_SEC_DEVKEY,
+	NL802154_CMD_DEL_SEC_DEVKEY,
+	NL802154_CMD_GET_SEC_LEVEL,		/* can dump */
+	NL802154_CMD_NEW_SEC_LEVEL,
+	NL802154_CMD_DEL_SEC_LEVEL,
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 	/* used to define NL802154_CMD_MAX below */
 	__NL802154_CMD_AFTER_LAST,
 	NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1
@@ -110,6 +126,18 @@
 
 	/* add attributes here, update the policy in nl802154.c */
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	NL802154_ATTR_SEC_ENABLED,
+	NL802154_ATTR_SEC_OUT_LEVEL,
+	NL802154_ATTR_SEC_OUT_KEY_ID,
+	NL802154_ATTR_SEC_FRAME_COUNTER,
+
+	NL802154_ATTR_SEC_LEVEL,
+	NL802154_ATTR_SEC_DEVICE,
+	NL802154_ATTR_SEC_DEVKEY,
+	NL802154_ATTR_SEC_KEY,
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 	__NL802154_ATTR_AFTER_LAST,
 	NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1
 };
@@ -247,4 +275,167 @@
 	NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
 };
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+
+enum nl802154_dev_addr_modes {
+	NL802154_DEV_ADDR_NONE,
+	__NL802154_DEV_ADDR_INVALID,
+	NL802154_DEV_ADDR_SHORT,
+	NL802154_DEV_ADDR_EXTENDED,
+
+	/* keep last */
+	__NL802154_DEV_ADDR_AFTER_LAST,
+	NL802154_DEV_ADDR_MAX = __NL802154_DEV_ADDR_AFTER_LAST - 1
+};
+
+enum nl802154_dev_addr_attrs {
+	NL802154_DEV_ADDR_ATTR_UNSPEC,
+
+	NL802154_DEV_ADDR_ATTR_PAN_ID,
+	NL802154_DEV_ADDR_ATTR_MODE,
+	NL802154_DEV_ADDR_ATTR_SHORT,
+	NL802154_DEV_ADDR_ATTR_EXTENDED,
+
+	/* keep last */
+	__NL802154_DEV_ADDR_ATTR_AFTER_LAST,
+	NL802154_DEV_ADDR_ATTR_MAX = __NL802154_DEV_ADDR_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_key_id_modes {
+	NL802154_KEY_ID_MODE_IMPLICIT,
+	NL802154_KEY_ID_MODE_INDEX,
+	NL802154_KEY_ID_MODE_INDEX_SHORT,
+	NL802154_KEY_ID_MODE_INDEX_EXTENDED,
+
+	/* keep last */
+	__NL802154_KEY_ID_MODE_AFTER_LAST,
+	NL802154_KEY_ID_MODE_MAX = __NL802154_KEY_ID_MODE_AFTER_LAST - 1
+};
+
+enum nl802154_key_id_attrs {
+	NL802154_KEY_ID_ATTR_UNSPEC,
+
+	NL802154_KEY_ID_ATTR_MODE,
+	NL802154_KEY_ID_ATTR_INDEX,
+	NL802154_KEY_ID_ATTR_IMPLICIT,
+	NL802154_KEY_ID_ATTR_SOURCE_SHORT,
+	NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
+
+	/* keep last */
+	__NL802154_KEY_ID_ATTR_AFTER_LAST,
+	NL802154_KEY_ID_ATTR_MAX = __NL802154_KEY_ID_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_seclevels {
+	NL802154_SECLEVEL_NONE,
+	NL802154_SECLEVEL_MIC32,
+	NL802154_SECLEVEL_MIC64,
+	NL802154_SECLEVEL_MIC128,
+	NL802154_SECLEVEL_ENC,
+	NL802154_SECLEVEL_ENC_MIC32,
+	NL802154_SECLEVEL_ENC_MIC64,
+	NL802154_SECLEVEL_ENC_MIC128,
+
+	/* keep last */
+	__NL802154_SECLEVEL_AFTER_LAST,
+	NL802154_SECLEVEL_MAX = __NL802154_SECLEVEL_AFTER_LAST - 1
+};
+
+enum nl802154_frames {
+	NL802154_FRAME_BEACON,
+	NL802154_FRAME_DATA,
+	NL802154_FRAME_ACK,
+	NL802154_FRAME_CMD,
+
+	/* keep last */
+	__NL802154_FRAME_AFTER_LAST,
+	NL802154_FRAME_MAX = __NL802154_FRAME_AFTER_LAST - 1
+};
+
+enum nl802154_cmd_frames {
+	__NL802154_CMD_FRAME_INVALID,
+	NL802154_CMD_FRAME_ASSOC_REQUEST,
+	NL802154_CMD_FRAME_ASSOC_RESPONSE,
+	NL802154_CMD_FRAME_DISASSOC_NOTIFY,
+	NL802154_CMD_FRAME_DATA_REQUEST,
+	NL802154_CMD_FRAME_PAN_ID_CONFLICT_NOTIFY,
+	NL802154_CMD_FRAME_ORPHAN_NOTIFY,
+	NL802154_CMD_FRAME_BEACON_REQUEST,
+	NL802154_CMD_FRAME_COORD_REALIGNMENT,
+	NL802154_CMD_FRAME_GTS_REQUEST,
+
+	/* keep last */
+	__NL802154_CMD_FRAME_AFTER_LAST,
+	NL802154_CMD_FRAME_MAX = __NL802154_CMD_FRAME_AFTER_LAST - 1
+};
+
+enum nl802154_seclevel_attrs {
+	NL802154_SECLEVEL_ATTR_UNSPEC,
+
+	NL802154_SECLEVEL_ATTR_LEVELS,
+	NL802154_SECLEVEL_ATTR_FRAME,
+	NL802154_SECLEVEL_ATTR_CMD_FRAME,
+	NL802154_SECLEVEL_ATTR_DEV_OVERRIDE,
+
+	/* keep last */
+	__NL802154_SECLEVEL_ATTR_AFTER_LAST,
+	NL802154_SECLEVEL_ATTR_MAX = __NL802154_SECLEVEL_ATTR_AFTER_LAST - 1
+};
+
+/* TODO what is this? couldn't find in mib */
+enum {
+	NL802154_DEVKEY_IGNORE,
+	NL802154_DEVKEY_RESTRICT,
+	NL802154_DEVKEY_RECORD,
+
+	/* keep last */
+	__NL802154_DEVKEY_AFTER_LAST,
+	NL802154_DEVKEY_MAX = __NL802154_DEVKEY_AFTER_LAST - 1
+};
+
+enum nl802154_dev {
+	NL802154_DEV_ATTR_UNSPEC,
+
+	NL802154_DEV_ATTR_FRAME_COUNTER,
+	NL802154_DEV_ATTR_PAN_ID,
+	NL802154_DEV_ATTR_SHORT_ADDR,
+	NL802154_DEV_ATTR_EXTENDED_ADDR,
+	NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
+	NL802154_DEV_ATTR_KEY_MODE,
+
+	/* keep last */
+	__NL802154_DEV_ATTR_AFTER_LAST,
+	NL802154_DEV_ATTR_MAX = __NL802154_DEV_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_devkey {
+	NL802154_DEVKEY_ATTR_UNSPEC,
+
+	NL802154_DEVKEY_ATTR_FRAME_COUNTER,
+	NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
+	NL802154_DEVKEY_ATTR_ID,
+
+	/* keep last */
+	__NL802154_DEVKEY_ATTR_AFTER_LAST,
+	NL802154_DEVKEY_ATTR_MAX = __NL802154_DEVKEY_ATTR_AFTER_LAST - 1
+};
+
+enum nl802154_key {
+	NL802154_KEY_ATTR_UNSPEC,
+
+	NL802154_KEY_ATTR_ID,
+	NL802154_KEY_ATTR_USAGE_FRAMES,
+	NL802154_KEY_ATTR_USAGE_CMDS,
+	NL802154_KEY_ATTR_BYTES,
+
+	/* keep last */
+	__NL802154_KEY_ATTR_AFTER_LAST,
+	NL802154_KEY_ATTR_MAX = __NL802154_KEY_ATTR_AFTER_LAST - 1
+};
+
+#define NL802154_KEY_SIZE		16
+#define NL802154_CMD_FRAME_NR_IDS	256
+
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 #endif /* __NL802154_H */
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 181f97f..a0dde04 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -34,9 +34,9 @@
 	char		*slab_name;
 	int		(*rtx_syn_ack)(const struct sock *sk,
 				       struct request_sock *req);
-	void		(*send_ack)(struct sock *sk, struct sk_buff *skb,
+	void		(*send_ack)(const struct sock *sk, struct sk_buff *skb,
 				    struct request_sock *req);
-	void		(*send_reset)(struct sock *sk,
+	void		(*send_reset)(const struct sock *sk,
 				      struct sk_buff *skb);
 	void		(*destructor)(struct request_sock *req);
 	void		(*syn_ack_timeout)(const struct request_sock *req);
@@ -50,16 +50,15 @@
 	struct sock_common		__req_common;
 #define rsk_refcnt			__req_common.skc_refcnt
 #define rsk_hash			__req_common.skc_hash
+#define rsk_listener			__req_common.skc_listener
+#define rsk_window_clamp		__req_common.skc_window_clamp
+#define rsk_rcv_wnd			__req_common.skc_rcv_wnd
 
 	struct request_sock		*dl_next;
-	struct sock			*rsk_listener;
 	u16				mss;
 	u8				num_retrans; /* number of retransmits */
 	u8				cookie_ts:1; /* syncookie: encode tcpopts in timestamp */
 	u8				num_timeout:7; /* number of timeouts */
-	/* The following two fields can be easily recomputed I think -AK */
-	u32				window_clamp; /* window clamp at creation time */
-	u32				rcv_wnd;	  /* rcv_wnd offered first time */
 	u32				ts_recent;
 	struct timer_list		rsk_timer;
 	const struct request_sock_ops	*rsk_ops;
@@ -69,24 +68,6 @@
 	u32				peer_secid;
 };
 
-static inline struct request_sock *
-reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
-{
-	struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
-
-	if (req) {
-		req->rsk_ops = ops;
-		sock_hold(sk_listener);
-		req->rsk_listener = sk_listener;
-		req->saved_syn = NULL;
-		/* Following is temporary. It is coupled with debugging
-		 * helpers in reqsk_put() & reqsk_free()
-		 */
-		atomic_set(&req->rsk_refcnt, 0);
-	}
-	return req;
-}
-
 static inline struct request_sock *inet_reqsk(struct sock *sk)
 {
 	return (struct request_sock *)sk;
@@ -97,6 +78,34 @@
 	return (struct sock *)req;
 }
 
+static inline struct request_sock *
+reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
+	    bool attach_listener)
+{
+	struct request_sock *req;
+
+	req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
+
+	if (req) {
+		req->rsk_ops = ops;
+		if (attach_listener) {
+			sock_hold(sk_listener);
+			req->rsk_listener = sk_listener;
+		} else {
+			req->rsk_listener = NULL;
+		}
+		req_to_sk(req)->sk_prot = sk_listener->sk_prot;
+		sk_node_init(&req_to_sk(req)->sk_node);
+		sk_tx_queue_clear(req_to_sk(req));
+		req->saved_syn = NULL;
+		/* Following is temporary. It is coupled with debugging
+		 * helpers in reqsk_put() & reqsk_free()
+		 */
+		atomic_set(&req->rsk_refcnt, 0);
+	}
+	return req;
+}
+
 static inline void reqsk_free(struct request_sock *req)
 {
 	/* temporary debugging */
@@ -117,26 +126,6 @@
 
 extern int sysctl_max_syn_backlog;
 
-/** struct listen_sock - listen state
- *
- * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
- */
-struct listen_sock {
-	int			qlen_inc; /* protected by listener lock */
-	int			young_inc;/* protected by listener lock */
-
-	/* following fields can be updated by timer */
-	atomic_t		qlen_dec; /* qlen = qlen_inc - qlen_dec */
-	atomic_t		young_dec;
-
-	u8			max_qlen_log ____cacheline_aligned_in_smp;
-	u8			synflood_warned;
-	/* 2 bytes hole, try to use */
-	u32			hash_rnd;
-	u32			nr_table_entries;
-	struct request_sock	*syn_table[0];
-};
-
 /*
  * For a TCP Fast Open listener -
  *	lock - protects the access to all the reqsk, which is co-owned by
@@ -170,127 +159,72 @@
  * @rskq_accept_head - FIFO head of established children
  * @rskq_accept_tail - FIFO tail of established children
  * @rskq_defer_accept - User waits for some data after accept()
- * @syn_wait_lock - serializer
- *
- * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
- * lock sock while browsing the listening hash (otherwise it's deadlock prone).
  *
  */
 struct request_sock_queue {
+	spinlock_t		rskq_lock;
+	u8			rskq_defer_accept;
+
+	u32			synflood_warned;
+	atomic_t		qlen;
+	atomic_t		young;
+
 	struct request_sock	*rskq_accept_head;
 	struct request_sock	*rskq_accept_tail;
-	u8			rskq_defer_accept;
-	struct listen_sock	*listen_opt;
-	struct fastopen_queue	*fastopenq; /* This is non-NULL iff TFO has been
-					     * enabled on this listener. Check
-					     * max_qlen != 0 in fastopen_queue
-					     * to determine if TFO is enabled
-					     * right at this moment.
+	struct fastopen_queue	fastopenq;  /* Check max_qlen != 0 to determine
+					     * if TFO is enabled.
 					     */
-
-	/* temporary alignment, our goal is to get rid of this lock */
-	spinlock_t		syn_wait_lock ____cacheline_aligned_in_smp;
 };
 
-int reqsk_queue_alloc(struct request_sock_queue *queue,
-		      unsigned int nr_table_entries);
+void reqsk_queue_alloc(struct request_sock_queue *queue);
 
-void __reqsk_queue_destroy(struct request_sock_queue *queue);
-void reqsk_queue_destroy(struct request_sock_queue *queue);
 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
 			   bool reset);
 
-static inline struct request_sock *
-	reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
-{
-	struct request_sock *req = queue->rskq_accept_head;
-
-	queue->rskq_accept_head = NULL;
-	return req;
-}
-
-static inline int reqsk_queue_empty(struct request_sock_queue *queue)
+static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
 {
 	return queue->rskq_accept_head == NULL;
 }
 
-static inline void reqsk_queue_add(struct request_sock_queue *queue,
-				   struct request_sock *req,
-				   struct sock *parent,
-				   struct sock *child)
+static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
+						      struct sock *parent)
 {
-	req->sk = child;
-	sk_acceptq_added(parent);
+	struct request_sock *req;
 
-	if (queue->rskq_accept_head == NULL)
-		queue->rskq_accept_head = req;
-	else
-		queue->rskq_accept_tail->dl_next = req;
-
-	queue->rskq_accept_tail = req;
-	req->dl_next = NULL;
-}
-
-static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
-{
-	struct request_sock *req = queue->rskq_accept_head;
-
-	WARN_ON(req == NULL);
-
-	queue->rskq_accept_head = req->dl_next;
-	if (queue->rskq_accept_head == NULL)
-		queue->rskq_accept_tail = NULL;
-
+	spin_lock_bh(&queue->rskq_lock);
+	req = queue->rskq_accept_head;
+	if (req) {
+		sk_acceptq_removed(parent);
+		queue->rskq_accept_head = req->dl_next;
+		if (queue->rskq_accept_head == NULL)
+			queue->rskq_accept_tail = NULL;
+	}
+	spin_unlock_bh(&queue->rskq_lock);
 	return req;
 }
 
 static inline void reqsk_queue_removed(struct request_sock_queue *queue,
 				       const struct request_sock *req)
 {
-	struct listen_sock *lopt = queue->listen_opt;
-
 	if (req->num_timeout == 0)
-		atomic_inc(&lopt->young_dec);
-	atomic_inc(&lopt->qlen_dec);
+		atomic_dec(&queue->young);
+	atomic_dec(&queue->qlen);
 }
 
 static inline void reqsk_queue_added(struct request_sock_queue *queue)
 {
-	struct listen_sock *lopt = queue->listen_opt;
-
-	lopt->young_inc++;
-	lopt->qlen_inc++;
-}
-
-static inline int listen_sock_qlen(const struct listen_sock *lopt)
-{
-	return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
-}
-
-static inline int listen_sock_young(const struct listen_sock *lopt)
-{
-	return lopt->young_inc - atomic_read(&lopt->young_dec);
+	atomic_inc(&queue->young);
+	atomic_inc(&queue->qlen);
 }
 
 static inline int reqsk_queue_len(const struct request_sock_queue *queue)
 {
-	const struct listen_sock *lopt = queue->listen_opt;
-
-	return lopt ? listen_sock_qlen(lopt) : 0;
+	return atomic_read(&queue->qlen);
 }
 
 static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
 {
-	return listen_sock_young(queue->listen_opt);
+	return atomic_read(&queue->young);
 }
 
-static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
-{
-	return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log;
-}
-
-void reqsk_queue_hash_req(struct request_sock_queue *queue,
-			  u32 hash, struct request_sock *req,
-			  unsigned long timeout);
-
 #endif /* _REQUEST_SOCK_H */
diff --git a/include/net/route.h b/include/net/route.h
index d1bd90b..ee81307 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -28,6 +28,8 @@
 #include <net/inetpeer.h>
 #include <net/flow.h>
 #include <net/inet_sock.h>
+#include <net/ip_fib.h>
+#include <net/l3mdev.h>
 #include <linux/in_route.h>
 #include <linux/rtnetlink.h>
 #include <linux/rcupdate.h>
@@ -112,7 +114,15 @@
 int ip_rt_init(void);
 void rt_cache_flush(struct net *net);
 void rt_flush_dev(struct net_device *dev);
-struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
+struct rtable *__ip_route_output_key_hash(struct net *, struct flowi4 *flp,
+					  int mp_hash);
+
+static inline struct rtable *__ip_route_output_key(struct net *net,
+						   struct flowi4 *flp)
+{
+	return __ip_route_output_key_hash(net, flp, -1);
+}
+
 struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
 				    const struct sock *sk);
 struct dst_entry *ipv4_blackhole_route(struct net *net,
@@ -256,9 +266,6 @@
 	if (inet_sk(sk)->transparent)
 		flow_flags |= FLOWI_FLAG_ANYSRC;
 
-	if (netif_index_is_vrf(sock_net(sk), oif))
-		flow_flags |= FLOWI_FLAG_VRFSRC | FLOWI_FLAG_SKIP_NH_OIF;
-
 	flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
 			   protocol, flow_flags, dst, src, dport, sport);
 }
@@ -275,6 +282,10 @@
 	ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
 			      sport, dport, sk);
 
+	if (!src && oif) {
+		l3mdev_get_saddr(net, oif, fl4);
+		src = fl4->saddr;
+	}
 	if (!dst || !src) {
 		rt = __ip_route_output_key(net, fl4);
 		if (IS_ERR(rt))
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index aff6ceb..2f87c1b 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -124,7 +124,8 @@
 	int			(*fill_link_af)(struct sk_buff *skb,
 						const struct net_device *dev,
 						u32 ext_filter_mask);
-	size_t			(*get_link_af_size)(const struct net_device *dev);
+	size_t			(*get_link_af_size)(const struct net_device *dev,
+						    u32 ext_filter_mask);
 
 	int			(*validate_link_af)(const struct net_device *dev,
 						    const struct nlattr *attr);
diff --git a/include/net/sock.h b/include/net/sock.h
index 94dff7f..aeed5c9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -150,6 +150,10 @@
  *	@skc_node: main hash linkage for various protocol lookup tables
  *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
  *	@skc_tx_queue_mapping: tx queue number for this connection
+ *	@skc_flags: place holder for sk_flags
+ *		%SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
+ *		%SO_OOBINLINE settings, %SO_TIMESTAMPING settings
+ *	@skc_incoming_cpu: record/match cpu processing incoming packets
  *	@skc_refcnt: reference count
  *
  *	This is the minimal network layer representation of sockets, the header
@@ -200,6 +204,16 @@
 
 	atomic64_t		skc_cookie;
 
+	/* following fields are padding to force
+	 * offset(struct sock, sk_refcnt) == 128 on 64bit arches
+	 * assuming IPV6 is enabled. We use this padding differently
+	 * for different kind of 'sockets'
+	 */
+	union {
+		unsigned long	skc_flags;
+		struct sock	*skc_listener; /* request_sock */
+		struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */
+	};
 	/*
 	 * fields between dontcopy_begin/dontcopy_end
 	 * are not copied in sock_copy()
@@ -212,9 +226,20 @@
 		struct hlist_nulls_node skc_nulls_node;
 	};
 	int			skc_tx_queue_mapping;
+	union {
+		int		skc_incoming_cpu;
+		u32		skc_rcv_wnd;
+		u32		skc_tw_rcv_nxt; /* struct tcp_timewait_sock  */
+	};
+
 	atomic_t		skc_refcnt;
 	/* private: */
 	int                     skc_dontcopy_end[0];
+	union {
+		u32		skc_rxhash;
+		u32		skc_window_clamp;
+		u32		skc_tw_snd_nxt; /* struct tcp_timewait_sock */
+	};
 	/* public: */
 };
 
@@ -243,8 +268,6 @@
   *	@sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
   *	@sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
   *	@sk_sndbuf: size of send buffer in bytes
-  *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
-  *		   %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
   *	@sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
   *	@sk_no_check_rx: allow zero checksum in RX packets
   *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
@@ -273,8 +296,6 @@
   *	@sk_rcvlowat: %SO_RCVLOWAT setting
   *	@sk_rcvtimeo: %SO_RCVTIMEO setting
   *	@sk_sndtimeo: %SO_SNDTIMEO setting
-  *	@sk_rxhash: flow hash received from netif layer
-  *	@sk_incoming_cpu: record cpu processing incoming packets
   *	@sk_txhash: computed flow hash for use on transmit
   *	@sk_filter: socket filtering instructions
   *	@sk_timer: sock cleanup timer
@@ -331,6 +352,9 @@
 #define sk_v6_daddr		__sk_common.skc_v6_daddr
 #define sk_v6_rcv_saddr	__sk_common.skc_v6_rcv_saddr
 #define sk_cookie		__sk_common.skc_cookie
+#define sk_incoming_cpu		__sk_common.skc_incoming_cpu
+#define sk_flags		__sk_common.skc_flags
+#define sk_rxhash		__sk_common.skc_rxhash
 
 	socket_lock_t		sk_lock;
 	struct sk_buff_head	sk_receive_queue;
@@ -350,14 +374,6 @@
 	} sk_backlog;
 #define sk_rmem_alloc sk_backlog.rmem_alloc
 	int			sk_forward_alloc;
-#ifdef CONFIG_RPS
-	__u32			sk_rxhash;
-#endif
-	u16			sk_incoming_cpu;
-	/* 16bit hole
-	 * Warned : sk_incoming_cpu can be set from softirq,
-	 * Do not use this hole without fully understanding possible issues.
-	 */
 
 	__u32			sk_txhash;
 #ifdef CONFIG_NET_RX_BUSY_POLL
@@ -373,7 +389,6 @@
 #ifdef CONFIG_XFRM
 	struct xfrm_policy	*sk_policy[2];
 #endif
-	unsigned long 		sk_flags;
 	struct dst_entry	*sk_rx_dst;
 	struct dst_entry __rcu	*sk_dst_cache;
 	spinlock_t		sk_dst_lock;
@@ -759,7 +774,7 @@
 
 #endif
 
-static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask)
+static inline gfp_t sk_gfp_atomic(const struct sock *sk, gfp_t gfp_mask)
 {
 	return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC);
 }
@@ -828,6 +843,14 @@
 	if (sk_rcvqueues_full(sk, limit))
 		return -ENOBUFS;
 
+	/*
+	 * If the skb was allocated from pfmemalloc reserves, only
+	 * allow SOCK_MEMALLOC sockets to use it as this socket is
+	 * helping free memory
+	 */
+	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
+		return -ENOMEM;
+
 	__sk_add_backlog(sk, skb);
 	sk->sk_backlog.len += skb->truesize;
 	return 0;
@@ -1514,6 +1537,13 @@
 void sock_kzfree_s(struct sock *sk, void *mem, int size);
 void sk_send_sigurg(struct sock *sk);
 
+struct sockcm_cookie {
+	u32 mark;
+};
+
+int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
+		   struct sockcm_cookie *sockc);
+
 /*
  * Functions to fill in entries in struct proto_ops when a protocol
  * does not implement a particular function.
@@ -2201,6 +2231,14 @@
 	return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
 }
 
+/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
+ * SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
+ */
+static inline bool sk_listener(const struct sock *sk)
+{
+	return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
+}
+
 void sock_enable_timestamp(struct sock *sk, int flag);
 int sock_get_timestamp(struct sock *, struct timeval __user *);
 int sock_get_timestampns(struct sock *, struct timespec __user *);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 1820787..bc865e2 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -14,8 +14,11 @@
 #include <linux/netdevice.h>
 #include <linux/notifier.h>
 #include <linux/list.h>
+#include <net/ip_fib.h>
 
 #define SWITCHDEV_F_NO_RECURSE		BIT(0)
+#define SWITCHDEV_F_SKIP_EOPNOTSUPP	BIT(1)
+#define SWITCHDEV_F_DEFER		BIT(2)
 
 struct switchdev_trans_item {
 	struct list_head list;
@@ -39,10 +42,11 @@
 }
 
 enum switchdev_attr_id {
-	SWITCHDEV_ATTR_UNDEFINED,
-	SWITCHDEV_ATTR_PORT_PARENT_ID,
-	SWITCHDEV_ATTR_PORT_STP_STATE,
-	SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+	SWITCHDEV_ATTR_ID_UNDEFINED,
+	SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
+	SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+	SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
+	SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
 };
 
 struct switchdev_attr {
@@ -52,49 +56,66 @@
 		struct netdev_phys_item_id ppid;	/* PORT_PARENT_ID */
 		u8 stp_state;				/* PORT_STP_STATE */
 		unsigned long brport_flags;		/* PORT_BRIDGE_FLAGS */
+		u32 ageing_time;			/* BRIDGE_AGEING_TIME */
 	} u;
 };
 
-struct fib_info;
-
 enum switchdev_obj_id {
-	SWITCHDEV_OBJ_UNDEFINED,
-	SWITCHDEV_OBJ_PORT_VLAN,
-	SWITCHDEV_OBJ_IPV4_FIB,
-	SWITCHDEV_OBJ_PORT_FDB,
+	SWITCHDEV_OBJ_ID_UNDEFINED,
+	SWITCHDEV_OBJ_ID_PORT_VLAN,
+	SWITCHDEV_OBJ_ID_IPV4_FIB,
+	SWITCHDEV_OBJ_ID_PORT_FDB,
 };
 
 struct switchdev_obj {
 	enum switchdev_obj_id id;
-	int (*cb)(struct net_device *dev, struct switchdev_obj *obj);
-	union {
-		struct switchdev_obj_vlan {		/* PORT_VLAN */
-			u16 flags;
-			u16 vid_begin;
-			u16 vid_end;
-		} vlan;
-		struct switchdev_obj_ipv4_fib {		/* IPV4_FIB */
-			u32 dst;
-			int dst_len;
-			struct fib_info *fi;
-			u8 tos;
-			u8 type;
-			u32 nlflags;
-			u32 tb_id;
-		} ipv4_fib;
-		struct switchdev_obj_fdb {		/* PORT_FDB */
-			const unsigned char *addr;
-			u16 vid;
-			u16 ndm_state;
-		} fdb;
-	} u;
+	u32 flags;
 };
 
+/* SWITCHDEV_OBJ_ID_PORT_VLAN */
+struct switchdev_obj_port_vlan {
+	struct switchdev_obj obj;
+	u16 flags;
+	u16 vid_begin;
+	u16 vid_end;
+};
+
+#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
+	container_of(obj, struct switchdev_obj_port_vlan, obj)
+
+/* SWITCHDEV_OBJ_ID_IPV4_FIB */
+struct switchdev_obj_ipv4_fib {
+	struct switchdev_obj obj;
+	u32 dst;
+	int dst_len;
+	struct fib_info fi;
+	u8 tos;
+	u8 type;
+	u32 nlflags;
+	u32 tb_id;
+};
+
+#define SWITCHDEV_OBJ_IPV4_FIB(obj) \
+	container_of(obj, struct switchdev_obj_ipv4_fib, obj)
+
+/* SWITCHDEV_OBJ_ID_PORT_FDB */
+struct switchdev_obj_port_fdb {
+	struct switchdev_obj obj;
+	unsigned char addr[ETH_ALEN];
+	u16 vid;
+	u16 ndm_state;
+};
+
+#define SWITCHDEV_OBJ_PORT_FDB(obj) \
+	container_of(obj, struct switchdev_obj_port_fdb, obj)
+
 void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
 				  void *data, void (*destructor)(void const *),
 				  struct switchdev_trans_item *tritem);
 void *switchdev_trans_item_dequeue(struct switchdev_trans *trans);
 
+typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
+
 /**
  * struct switchdev_ops - switchdev operations
  *
@@ -102,25 +123,26 @@
  *
  * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
  *
- * @switchdev_port_obj_add: Add an object to port (see switchdev_obj).
+ * @switchdev_port_obj_add: Add an object to port (see switchdev_obj_*).
  *
- * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj).
+ * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj_*).
  *
- * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj).
+ * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj_*).
  */
 struct switchdev_ops {
 	int	(*switchdev_port_attr_get)(struct net_device *dev,
 					   struct switchdev_attr *attr);
 	int	(*switchdev_port_attr_set)(struct net_device *dev,
-					   struct switchdev_attr *attr,
+					   const struct switchdev_attr *attr,
 					   struct switchdev_trans *trans);
 	int	(*switchdev_port_obj_add)(struct net_device *dev,
-					  struct switchdev_obj *obj,
+					  const struct switchdev_obj *obj,
 					  struct switchdev_trans *trans);
 	int	(*switchdev_port_obj_del)(struct net_device *dev,
-					  struct switchdev_obj *obj);
+					  const struct switchdev_obj *obj);
 	int	(*switchdev_port_obj_dump)(struct net_device *dev,
-					  struct switchdev_obj *obj);
+					   struct switchdev_obj *obj,
+					   switchdev_obj_dump_cb_t *cb);
 };
 
 enum switchdev_notifier_type {
@@ -146,13 +168,17 @@
 
 #ifdef CONFIG_NET_SWITCHDEV
 
+void switchdev_deferred_process(void);
 int switchdev_port_attr_get(struct net_device *dev,
 			    struct switchdev_attr *attr);
 int switchdev_port_attr_set(struct net_device *dev,
-			    struct switchdev_attr *attr);
-int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj);
-int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj);
-int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj);
+			    const struct switchdev_attr *attr);
+int switchdev_port_obj_add(struct net_device *dev,
+			   const struct switchdev_obj *obj);
+int switchdev_port_obj_del(struct net_device *dev,
+			   const struct switchdev_obj *obj);
+int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
+			    switchdev_obj_dump_cb_t *cb);
 int register_switchdev_notifier(struct notifier_block *nb);
 int unregister_switchdev_notifier(struct notifier_block *nb);
 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
@@ -184,6 +210,10 @@
 
 #else
 
+static inline void switchdev_deferred_process(void)
+{
+}
+
 static inline int switchdev_port_attr_get(struct net_device *dev,
 					  struct switchdev_attr *attr)
 {
@@ -191,25 +221,26 @@
 }
 
 static inline int switchdev_port_attr_set(struct net_device *dev,
-					  struct switchdev_attr *attr)
+					  const struct switchdev_attr *attr)
 {
 	return -EOPNOTSUPP;
 }
 
 static inline int switchdev_port_obj_add(struct net_device *dev,
-					 struct switchdev_obj *obj)
+					 const struct switchdev_obj *obj)
 {
 	return -EOPNOTSUPP;
 }
 
 static inline int switchdev_port_obj_del(struct net_device *dev,
-					 struct switchdev_obj *obj)
+					 const struct switchdev_obj *obj)
 {
 	return -EOPNOTSUPP;
 }
 
 static inline int switchdev_port_obj_dump(struct net_device *dev,
-					  struct switchdev_obj *obj)
+					  const struct switchdev_obj *obj,
+					  switchdev_obj_dump_cb_t *cb)
 {
 	return -EOPNOTSUPP;
 }
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cdbf63d..f80e74c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -279,6 +279,7 @@
 extern int sysctl_tcp_challenge_ack_limit;
 extern unsigned int sysctl_tcp_notsent_lowat;
 extern int sysctl_tcp_min_tso_segs;
+extern int sysctl_tcp_min_rtt_wlen;
 extern int sysctl_tcp_autocorking;
 extern int sysctl_tcp_invalid_ratelimit;
 extern int sysctl_tcp_pacing_ss_ratio;
@@ -365,8 +366,7 @@
 void tcp_write_timer_handler(struct sock *sk);
 void tcp_delack_timer_handler(struct sock *sk);
 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-			  const struct tcphdr *th, unsigned int len);
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 			 const struct tcphdr *th, unsigned int len);
 void tcp_rcv_space_adjust(struct sock *sk);
@@ -451,19 +451,22 @@
 void tcp_v4_mtu_reduced(struct sock *sk);
 void tcp_req_err(struct sock *sk, u32 seq);
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
-struct sock *tcp_create_openreq_child(struct sock *sk,
+struct sock *tcp_create_openreq_child(const struct sock *sk,
 				      struct request_sock *req,
 				      struct sk_buff *skb);
 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
-struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 				  struct request_sock *req,
-				  struct dst_entry *dst);
+				  struct dst_entry *dst,
+				  struct request_sock *req_unhash,
+				  bool *own_req);
 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 int tcp_connect(struct sock *sk);
 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 				struct request_sock *req,
-				struct tcp_fastopen_cookie *foc);
+				struct tcp_fastopen_cookie *foc,
+				bool attach_req);
 int tcp_disconnect(struct sock *sk, int flags);
 
 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -492,8 +495,9 @@
 
 /* syncookies: remember time of last synqueue overflow
  * But do not dirty this field too often (once per second is enough)
+ * It is racy as we do not hold a lock, but race is very minor.
  */
-static inline void tcp_synq_overflow(struct sock *sk)
+static inline void tcp_synq_overflow(const struct sock *sk)
 {
 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 	unsigned long now = jiffies;
@@ -520,8 +524,7 @@
 
 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 			      u16 *mssp);
-__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
-			      __u16 *mss);
+__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
 __u32 cookie_init_timestamp(struct request_sock *req);
 bool cookie_timestamp_decode(struct tcp_options_received *opt);
 bool cookie_ecn_ok(const struct tcp_options_received *opt,
@@ -534,8 +537,7 @@
 
 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
 			      const struct tcphdr *th, u16 *mssp);
-__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
-			      __u16 *mss);
+__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
 #endif
 /* tcp_output.c */
 
@@ -567,6 +569,7 @@
 void tcp_rearm_rto(struct sock *sk);
 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 void tcp_reset(struct sock *sk);
+void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
 
 /* tcp_timer.c */
 void tcp_init_xmit_timers(struct sock *);
@@ -672,6 +675,12 @@
 	return dst_metric_locked(dst, RTAX_CC_ALGO);
 }
 
+/* Minimum RTT in usec. ~0 means not available. */
+static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
+{
+	return tp->rtt_min[0].rtt;
+}
+
 /* Compute the actual receive window we are currently advertising.
  * Rcv_nxt can be after the window if our peer push more data
  * than the offered window.
@@ -1620,7 +1629,6 @@
 /* /proc */
 enum tcp_seq_states {
 	TCP_SEQ_STATE_LISTENING,
-	TCP_SEQ_STATE_OPENREQ,
 	TCP_SEQ_STATE_ESTABLISHED,
 };
 
@@ -1639,7 +1647,6 @@
 	enum tcp_seq_states	state;
 	struct sock		*syn_wait_sk;
 	int			bucket, offset, sbucket, num;
-	kuid_t			uid;
 	loff_t			last_pos;
 };
 
@@ -1710,30 +1717,31 @@
 			 const struct sock *sk_listener,
 			 struct sk_buff *skb);
 #ifdef CONFIG_SYN_COOKIES
-	__u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb,
+	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
 				 __u16 *mss);
 #endif
-	struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl,
+	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
 				       const struct request_sock *req,
 				       bool *strict);
 	__u32 (*init_seq)(const struct sk_buff *skb);
 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
 			   struct flowi *fl, struct request_sock *req,
-			   u16 queue_mapping, struct tcp_fastopen_cookie *foc);
-	void (*queue_hash_add)(struct sock *sk, struct request_sock *req,
-			       const unsigned long timeout);
+			   struct tcp_fastopen_cookie *foc,
+			   bool attach_req);
 };
 
 #ifdef CONFIG_SYN_COOKIES
 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
-					 struct sock *sk, struct sk_buff *skb,
+					 const struct sock *sk, struct sk_buff *skb,
 					 __u16 *mss)
 {
-	return ops->cookie_init_seq(sk, skb, mss);
+	tcp_synq_overflow(sk);
+	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
+	return ops->cookie_init_seq(skb, mss);
 }
 #else
 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
-					 struct sock *sk, struct sk_buff *skb,
+					 const struct sock *sk, struct sk_buff *skb,
 					 __u16 *mss)
 {
 	return 0;
@@ -1745,6 +1753,19 @@
 void tcp_v4_init(void);
 void tcp_init(void);
 
+/* tcp_recovery.c */
+
+/* Flags to enable various loss recovery features. See below */
+extern int sysctl_tcp_recovery;
+
+/* Use TCP RACK to detect (some) tail and retransmit losses */
+#define TCP_RACK_LOST_RETRANS  0x1
+
+extern int tcp_rack_mark_lost(struct sock *sk);
+
+extern void tcp_rack_advance(struct tcp_sock *tp,
+			     const struct skb_mstamp *xmit_time, u8 sacked);
+
 /*
  * Save and compile IPv4 options, return a pointer to it
  */
diff --git a/include/net/tso.h b/include/net/tso.h
index 47e5444..b7be852 100644
--- a/include/net/tso.h
+++ b/include/net/tso.h
@@ -8,6 +8,7 @@
 	void *data;
 	size_t size;
 	u16 ip_id;
+	bool ipv6;
 	u32 tcp_seq;
 };
 
diff --git a/include/net/vrf.h b/include/net/vrf.h
deleted file mode 100644
index 593e609..0000000
--- a/include/net/vrf.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * include/net/net_vrf.h - adds vrf dev structure definitions
- * Copyright (c) 2015 Cumulus Networks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __LINUX_NET_VRF_H
-#define __LINUX_NET_VRF_H
-
-struct net_vrf_dev {
-	struct rcu_head		rcu;
-	int                     ifindex; /* ifindex of master dev */
-	u32                     tb_id;   /* table id for VRF */
-};
-
-struct slave {
-	struct list_head	list;
-	struct net_device	*dev;
-};
-
-struct slave_queue {
-	struct list_head	all_slaves;
-};
-
-struct net_vrf {
-	struct slave_queue	queue;
-	struct rtable           *rth;
-	u32			tb_id;
-};
-
-
-#if IS_ENABLED(CONFIG_NET_VRF)
-/* called with rcu_read_lock() */
-static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
-{
-	struct net_vrf_dev *vrf_ptr;
-	int ifindex = 0;
-
-	if (!dev)
-		return 0;
-
-	if (netif_is_vrf(dev)) {
-		ifindex = dev->ifindex;
-	} else {
-		vrf_ptr = rcu_dereference(dev->vrf_ptr);
-		if (vrf_ptr)
-			ifindex = vrf_ptr->ifindex;
-	}
-
-	return ifindex;
-}
-
-static inline int vrf_master_ifindex(const struct net_device *dev)
-{
-	int ifindex;
-
-	rcu_read_lock();
-	ifindex = vrf_master_ifindex_rcu(dev);
-	rcu_read_unlock();
-
-	return ifindex;
-}
-
-/* called with rcu_read_lock */
-static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
-{
-	u32 tb_id = 0;
-
-	if (dev) {
-		struct net_vrf_dev *vrf_ptr;
-
-		vrf_ptr = rcu_dereference(dev->vrf_ptr);
-		if (vrf_ptr)
-			tb_id = vrf_ptr->tb_id;
-	}
-	return tb_id;
-}
-
-static inline u32 vrf_dev_table(const struct net_device *dev)
-{
-	u32 tb_id;
-
-	rcu_read_lock();
-	tb_id = vrf_dev_table_rcu(dev);
-	rcu_read_unlock();
-
-	return tb_id;
-}
-
-static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
-{
-	struct net_device *dev;
-	u32 tb_id = 0;
-
-	if (!ifindex)
-		return 0;
-
-	rcu_read_lock();
-
-	dev = dev_get_by_index_rcu(net, ifindex);
-	if (dev)
-		tb_id = vrf_dev_table_rcu(dev);
-
-	rcu_read_unlock();
-
-	return tb_id;
-}
-
-/* called with rtnl */
-static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
-{
-	u32 tb_id = 0;
-
-	if (dev) {
-		struct net_vrf_dev *vrf_ptr;
-
-		vrf_ptr = rtnl_dereference(dev->vrf_ptr);
-		if (vrf_ptr)
-			tb_id = vrf_ptr->tb_id;
-	}
-	return tb_id;
-}
-
-/* caller has already checked netif_is_vrf(dev) */
-static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
-{
-	struct rtable *rth = ERR_PTR(-ENETUNREACH);
-	struct net_vrf *vrf = netdev_priv(dev);
-
-	if (vrf) {
-		rth = vrf->rth;
-		atomic_inc(&rth->dst.__refcnt);
-	}
-	return rth;
-}
-
-#else
-static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
-{
-	return 0;
-}
-
-static inline int vrf_master_ifindex(const struct net_device *dev)
-{
-	return 0;
-}
-
-static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
-{
-	return 0;
-}
-
-static inline u32 vrf_dev_table(const struct net_device *dev)
-{
-	return 0;
-}
-
-static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
-{
-	return 0;
-}
-
-static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
-{
-	return 0;
-}
-
-static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
-{
-	return ERR_PTR(-ENETUNREACH);
-}
-#endif
-
-#endif /* __LINUX_NET_VRF_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index fd17610..4a9c21f 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -333,7 +333,7 @@
 						const xfrm_address_t *saddr);
 	int			(*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
 	int			(*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
-	int			(*output)(struct sock *sk, struct sk_buff *skb);
+	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
 	int			(*output_finish)(struct sock *sk, struct sk_buff *skb);
 	int			(*extract_input)(struct xfrm_state *x,
 						 struct sk_buff *skb);
@@ -1527,7 +1527,7 @@
 
 int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-int xfrm4_output(struct sock *sk, struct sk_buff *skb);
+int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
 int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
@@ -1552,7 +1552,7 @@
 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
 int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-int xfrm6_output(struct sock *sk, struct sk_buff *skb);
+int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
 int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
 			  u8 **prevhdr);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index ac9bf1c..5f48754 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -730,6 +730,7 @@
 #define DF_EMULATED_VPD_UNIT_SERIAL		0x00000004
 #define DF_USING_UDEV_PATH			0x00000008
 #define DF_USING_ALIAS				0x00000010
+#define DF_READ_ONLY				0x00000020
 	/* Physical device queue depth */
 	u32			queue_depth;
 	/* Used for SPC-2 reservations enforce of ISIDs */
diff --git a/include/uapi/asm-generic/signal.h b/include/uapi/asm-generic/signal.h
index 9df61f1..3094618 100644
--- a/include/uapi/asm-generic/signal.h
+++ b/include/uapi/asm-generic/signal.h
@@ -80,8 +80,10 @@
  *	SA_RESTORER	0x04000000
  */
 
+#if !defined MINSIGSTKSZ || !defined SIGSTKSZ
 #define MINSIGSTKSZ	2048
 #define SIGSTKSZ	8192
+#endif
 
 #ifndef __ASSEMBLY__
 typedef struct {
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index f7b2db4..70d8923 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -263,6 +263,7 @@
 header-y += mman.h
 header-y += mmtimer.h
 header-y += mpls.h
+header-y += mpls_iptunnel.h
 header-y += mqueue.h
 header-y += mroute6.h
 header-y += mroute.h
diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h
index 10f0fa2..9c9c6ad 100644
--- a/include/uapi/linux/atm_zatm.h
+++ b/include/uapi/linux/atm_zatm.h
@@ -35,12 +35,6 @@
 	struct zatm_pool_info info;	/* actual information */
 };
 
-struct zatm_t_hist {
-	struct timeval real;		/* real (wall-clock) time */
-	struct timeval expected;	/* expected real time */
-};
-
-
 #define ZATM_OAM_POOL		0	/* free buffer pool for OAM cells */
 #define ZATM_AAL0_POOL		1	/* free buffer pool for AAL0 cells */
 #define ZATM_AAL5_POOL_BASE	2	/* first AAL5 free buffer pool */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 4ec0b54..2e03242 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -280,6 +280,24 @@
 	 * Return: TC_ACT_REDIRECT
 	 */
 	BPF_FUNC_redirect,
+
+	/**
+	 * bpf_get_route_realm(skb) - retrieve a dst's tclassid
+	 * @skb: pointer to skb
+	 * Return: realm if != 0
+	 */
+	BPF_FUNC_get_route_realm,
+
+	/**
+	 * bpf_perf_event_output(ctx, map, index, data, size) - output perf raw sample
+	 * @ctx: struct pt_regs*
+	 * @map: pointer to perf_event_array map
+	 * @index: index of event in the map
+	 * @data: data on stack to be output as raw data
+	 * @size: size of data
+	 * Return: 0 on success
+	 */
+	BPF_FUNC_perf_event_output,
 	__BPF_FUNC_MAX_ID,
 };
 
diff --git a/include/uapi/linux/can/bcm.h b/include/uapi/linux/can/bcm.h
index 89ddb9d..7a291dc 100644
--- a/include/uapi/linux/can/bcm.h
+++ b/include/uapi/linux/can/bcm.h
@@ -47,6 +47,11 @@
 #include <linux/types.h>
 #include <linux/can.h>
 
+struct bcm_timeval {
+	long tv_sec;
+	long tv_usec;
+};
+
 /**
  * struct bcm_msg_head - head of messages to/from the broadcast manager
  * @opcode:    opcode, see enum below.
@@ -62,7 +67,7 @@
 	__u32 opcode;
 	__u32 flags;
 	__u32 count;
-	struct timeval ival1, ival2;
+	struct bcm_timeval ival1, ival2;
 	canid_t can_id;
 	__u32 nframes;
 	struct can_frame frames[0];
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 3635b77..18db144 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -127,6 +127,7 @@
 #define BRIDGE_VLAN_INFO_UNTAGGED	(1<<2)	/* VLAN egresses untagged */
 #define BRIDGE_VLAN_INFO_RANGE_BEGIN	(1<<3) /* VLAN is start of vlan range */
 #define BRIDGE_VLAN_INFO_RANGE_END	(1<<4) /* VLAN is end of vlan range */
+#define BRIDGE_VLAN_INFO_BRENTRY	(1<<5) /* Global bridge VLAN entry */
 
 struct bridge_vlan_info {
 	__u16 flags;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 3a5f263..5ad5737 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -232,11 +232,47 @@
 	IFLA_BR_PRIORITY,
 	IFLA_BR_VLAN_FILTERING,
 	IFLA_BR_VLAN_PROTOCOL,
+	IFLA_BR_GROUP_FWD_MASK,
+	IFLA_BR_ROOT_ID,
+	IFLA_BR_BRIDGE_ID,
+	IFLA_BR_ROOT_PORT,
+	IFLA_BR_ROOT_PATH_COST,
+	IFLA_BR_TOPOLOGY_CHANGE,
+	IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
+	IFLA_BR_HELLO_TIMER,
+	IFLA_BR_TCN_TIMER,
+	IFLA_BR_TOPOLOGY_CHANGE_TIMER,
+	IFLA_BR_GC_TIMER,
+	IFLA_BR_GROUP_ADDR,
+	IFLA_BR_FDB_FLUSH,
+	IFLA_BR_MCAST_ROUTER,
+	IFLA_BR_MCAST_SNOOPING,
+	IFLA_BR_MCAST_QUERY_USE_IFADDR,
+	IFLA_BR_MCAST_QUERIER,
+	IFLA_BR_MCAST_HASH_ELASTICITY,
+	IFLA_BR_MCAST_HASH_MAX,
+	IFLA_BR_MCAST_LAST_MEMBER_CNT,
+	IFLA_BR_MCAST_STARTUP_QUERY_CNT,
+	IFLA_BR_MCAST_LAST_MEMBER_INTVL,
+	IFLA_BR_MCAST_MEMBERSHIP_INTVL,
+	IFLA_BR_MCAST_QUERIER_INTVL,
+	IFLA_BR_MCAST_QUERY_INTVL,
+	IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
+	IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
+	IFLA_BR_NF_CALL_IPTABLES,
+	IFLA_BR_NF_CALL_IP6TABLES,
+	IFLA_BR_NF_CALL_ARPTABLES,
+	IFLA_BR_VLAN_DEFAULT_PVID,
 	__IFLA_BR_MAX,
 };
 
 #define IFLA_BR_MAX	(__IFLA_BR_MAX - 1)
 
+struct ifla_bridge_id {
+	__u8	prio[2];
+	__u8	addr[6]; /* ETH_ALEN */
+};
+
 enum {
 	BRIDGE_MODE_UNSPEC,
 	BRIDGE_MODE_HAIRPIN,
@@ -256,6 +292,19 @@
 	IFLA_BRPORT_PROXYARP,	/* proxy ARP */
 	IFLA_BRPORT_LEARNING_SYNC, /* mac learning sync from device */
 	IFLA_BRPORT_PROXYARP_WIFI, /* proxy ARP for Wi-Fi */
+	IFLA_BRPORT_ROOT_ID,	/* designated root */
+	IFLA_BRPORT_BRIDGE_ID,	/* designated bridge */
+	IFLA_BRPORT_DESIGNATED_PORT,
+	IFLA_BRPORT_DESIGNATED_COST,
+	IFLA_BRPORT_ID,
+	IFLA_BRPORT_NO,
+	IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
+	IFLA_BRPORT_CONFIG_PENDING,
+	IFLA_BRPORT_MESSAGE_AGE_TIMER,
+	IFLA_BRPORT_FORWARD_DELAY_TIMER,
+	IFLA_BRPORT_HOLD_TIMER,
+	IFLA_BRPORT_FLUSH,
+	IFLA_BRPORT_MULTICAST_ROUTER,
 	__IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -412,6 +461,7 @@
 	IFLA_GENEVE_TOS,
 	IFLA_GENEVE_PORT,	/* destination port */
 	IFLA_GENEVE_COLLECT_METADATA,
+	IFLA_GENEVE_REMOTE6,
 	__IFLA_GENEVE_MAX
 };
 #define IFLA_GENEVE_MAX	(__IFLA_GENEVE_MAX - 1)
@@ -501,6 +551,7 @@
 				 * on/off switch
 				 */
 	IFLA_VF_STATS,		/* network device statistics */
+	IFLA_VF_TRUST,		/* Trust VF */
 	__IFLA_VF_MAX,
 };
 
@@ -562,6 +613,11 @@
 
 #define IFLA_VF_STATS_MAX (__IFLA_VF_STATS_MAX - 1)
 
+struct ifla_vf_trust {
+	__u32 vf;
+	__u32 setting;
+};
+
 /* VF ports management section
  *
  *	Nested layout of set/get msg is:
diff --git a/include/uapi/linux/netfilter/nfnetlink_log.h b/include/uapi/linux/netfilter/nfnetlink_log.h
index 90c2c95..fb21f0c 100644
--- a/include/uapi/linux/netfilter/nfnetlink_log.h
+++ b/include/uapi/linux/netfilter/nfnetlink_log.h
@@ -51,6 +51,8 @@
 	NFULA_HWTYPE,			/* hardware type */
 	NFULA_HWHEADER,			/* hardware header */
 	NFULA_HWLEN,			/* hardware header length */
+	NFULA_CT,                       /* nf_conntrack_netlink.h */
+	NFULA_CT_INFO,                  /* enum ip_conntrack_info */
 
 	__NFULA_MAX
 };
@@ -93,5 +95,6 @@
 
 #define NFULNL_CFG_F_SEQ	0x0001
 #define NFULNL_CFG_F_SEQ_GLOBAL	0x0002
+#define NFULNL_CFG_F_CONNTRACK	0x0004
 
 #endif /* _NFNETLINK_LOG_H */
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 6f3fe16..f095155 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -54,6 +54,7 @@
 #define NLM_F_ACK		4	/* Reply with ack, with zero or error code */
 #define NLM_F_ECHO		8	/* Echo this request 		*/
 #define NLM_F_DUMP_INTR		16	/* Dump was inconsistent due to sequence change */
+#define NLM_F_DUMP_FILTERED	32	/* Dump was filtered as requested */
 
 /* Modifiers to GET request */
 #define NLM_F_ROOT	0x100	/* specify tree	root	*/
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index dd3f753..399f39f 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -86,6 +86,7 @@
  *	for this event is the application ID (AID).
  * @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller.
  * @NFC_CMD_SE_IO: Send/Receive APDUs to/from the selected secure element.
+ * @NFC_CMD_ACTIVATE_TARGET: Request NFC controller to reactivate target.
  * @NFC_CMD_VENDOR: Vendor specific command, to be implemented directly
  *	from the driver in order to support hardware specific operations.
  */
@@ -156,6 +157,7 @@
  * @NFC_ATTR_APDU: Secure element APDU
  * @NFC_ATTR_TARGET_ISO15693_DSFID: ISO 15693 Data Storage Format Identifier
  * @NFC_ATTR_TARGET_ISO15693_UID: ISO 15693 Unique Identifier
+ * @NFC_ATTR_SE_PARAMS: Parameters data from an evt_transaction
  * @NFC_ATTR_VENDOR_ID: NFC manufacturer unique ID, typically an OUI
  * @NFC_ATTR_VENDOR_SUBCMD: Vendor specific sub command
  * @NFC_ATTR_VENDOR_DATA: Vendor specific data, to be optionally passed
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index c0ab6b0..1f0b4cf 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -10,6 +10,7 @@
  * Copyright 2008, 2009 Luis R. Rodriguez <lrodriguez@atheros.com>
  * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
  * Copyright 2008 Colin McCabe <colin@cozybit.com>
+ * Copyright 2015	Intel Deutschland GmbH
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -328,7 +329,15 @@
  *	partial scan results may be available
  *
  * @NL80211_CMD_START_SCHED_SCAN: start a scheduled scan at certain
- *	intervals, as specified by %NL80211_ATTR_SCHED_SCAN_INTERVAL.
+ *	intervals and certain number of cycles, as specified by
+ *	%NL80211_ATTR_SCHED_SCAN_PLANS. If %NL80211_ATTR_SCHED_SCAN_PLANS is
+ *	not specified and only %NL80211_ATTR_SCHED_SCAN_INTERVAL is specified,
+ *	scheduled scan will run in an infinite loop with the specified interval.
+ *	These attributes are mutually exculsive,
+ *	i.e. NL80211_ATTR_SCHED_SCAN_INTERVAL must not be passed if
+ *	NL80211_ATTR_SCHED_SCAN_PLANS is defined.
+ *	If for some reason scheduled scan is aborted by the driver, all scan
+ *	plans are canceled (including scan plans that did not start yet).
  *	Like with normal scans, if SSIDs (%NL80211_ATTR_SCAN_SSIDS)
  *	are passed, they are used in the probe requests.  For
  *	broadcast, a broadcast SSID must be passed (ie. an empty
@@ -1761,6 +1770,19 @@
  * @NL80211_ATTR_REG_INDOOR: flag attribute, if set indicates that the device
  *      is operating in an indoor environment.
  *
+ * @NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS: maximum number of scan plans for
+ *	scheduled scan supported by the device (u32), a wiphy attribute.
+ * @NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL: maximum interval (in seconds) for
+ *	a scan plan (u32), a wiphy attribute.
+ * @NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS: maximum number of iterations in
+ *	a scan plan (u32), a wiphy attribute.
+ * @NL80211_ATTR_SCHED_SCAN_PLANS: a list of scan plans for scheduled scan.
+ *	Each scan plan defines the number of scan iterations and the interval
+ *	between scans. The last scan plan will always run infinitely,
+ *	thus it must not specify the number of iterations, only the interval
+ *	between scans. The scan plans are executed sequentially.
+ *	Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan.
+ *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2130,6 +2152,11 @@
 
 	NL80211_ATTR_REG_INDOOR,
 
+	NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS,
+	NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL,
+	NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS,
+	NL80211_ATTR_SCHED_SCAN_PLANS,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -3364,6 +3391,9 @@
  *	(not present if no beacon frame has been received yet)
  * @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and
  *	@NL80211_BSS_TSF is known to be from a probe response (flag attribute)
+ * @NL80211_BSS_LAST_SEEN_BOOTTIME: CLOCK_BOOTTIME timestamp when this entry
+ *	was last updated by a received frame. The value is expected to be
+ *	accurate to about 10ms. (u64, nanoseconds)
  * @__NL80211_BSS_AFTER_LAST: internal
  * @NL80211_BSS_MAX: highest BSS attribute
  */
@@ -3383,6 +3413,7 @@
 	NL80211_BSS_CHAN_WIDTH,
 	NL80211_BSS_BEACON_TSF,
 	NL80211_BSS_PRESP_DATA,
+	NL80211_BSS_LAST_SEEN_BOOTTIME,
 
 	/* keep last */
 	__NL80211_BSS_AFTER_LAST,
@@ -4589,4 +4620,28 @@
 	NL80211_TDLS_PEER_WMM = 1<<2,
 };
 
+/**
+ * enum nl80211_sched_scan_plan - scanning plan for scheduled scan
+ * @__NL80211_SCHED_SCAN_PLAN_INVALID: attribute number 0 is reserved
+ * @NL80211_SCHED_SCAN_PLAN_INTERVAL: interval between scan iterations. In
+ *	seconds (u32).
+ * @NL80211_SCHED_SCAN_PLAN_ITERATIONS: number of scan iterations in this
+ *	scan plan (u32). The last scan plan must not specify this attribute
+ *	because it will run infinitely. A value of zero is invalid as it will
+ *	make the scan plan meaningless.
+ * @NL80211_SCHED_SCAN_PLAN_MAX: highest scheduled scan plan attribute number
+ *	currently defined
+ * @__NL80211_SCHED_SCAN_PLAN_AFTER_LAST: internal use
+ */
+enum nl80211_sched_scan_plan {
+	__NL80211_SCHED_SCAN_PLAN_INVALID,
+	NL80211_SCHED_SCAN_PLAN_INTERVAL,
+	NL80211_SCHED_SCAN_PLAN_ITERATIONS,
+
+	/* keep last */
+	__NL80211_SCHED_SCAN_PLAN_AFTER_LAST,
+	NL80211_SCHED_SCAN_PLAN_MAX =
+		__NL80211_SCHED_SCAN_PLAN_AFTER_LAST - 1
+};
+
 #endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 32e07d8..28ccedd 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -323,10 +323,10 @@
 	OVS_KEY_ATTR_MPLS,      /* array of struct ovs_key_mpls.
 				 * The implementation may restrict
 				 * the accepted length of the array. */
-	OVS_KEY_ATTR_CT_STATE,	/* u8 bitmask of OVS_CS_F_* */
+	OVS_KEY_ATTR_CT_STATE,	/* u32 bitmask of OVS_CS_F_* */
 	OVS_KEY_ATTR_CT_ZONE,	/* u16 connection tracking zone. */
 	OVS_KEY_ATTR_CT_MARK,	/* u32 connection tracking mark */
-	OVS_KEY_ATTR_CT_LABEL,	/* 16-octet connection tracking label */
+	OVS_KEY_ATTR_CT_LABELS,	/* 16-octet connection tracking label */
 
 #ifdef __KERNEL__
 	OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ip_tunnel_info */
@@ -349,6 +349,8 @@
 	OVS_TUNNEL_KEY_ATTR_TP_SRC,		/* be16 src Transport Port. */
 	OVS_TUNNEL_KEY_ATTR_TP_DST,		/* be16 dst Transport Port. */
 	OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS,		/* Nested OVS_VXLAN_EXT_* */
+	OVS_TUNNEL_KEY_ATTR_IPV6_SRC,		/* struct in6_addr src IPv6 address. */
+	OVS_TUNNEL_KEY_ATTR_IPV6_DST,		/* struct in6_addr dst IPv6 address. */
 	__OVS_TUNNEL_KEY_ATTR_MAX
 };
 
@@ -439,9 +441,9 @@
 	__u8	nd_tll[ETH_ALEN];
 };
 
-#define OVS_CT_LABEL_LEN	16
-struct ovs_key_ct_label {
-	__u8	ct_label[OVS_CT_LABEL_LEN];
+#define OVS_CT_LABELS_LEN	16
+struct ovs_key_ct_labels {
+	__u8	ct_labels[OVS_CT_LABELS_LEN];
 };
 
 /* OVS_KEY_ATTR_CT_STATE flags */
@@ -449,9 +451,9 @@
 #define OVS_CS_F_ESTABLISHED       0x02 /* Part of an existing connection. */
 #define OVS_CS_F_RELATED           0x04 /* Related to an established
 					 * connection. */
-#define OVS_CS_F_INVALID           0x20 /* Could not track connection. */
-#define OVS_CS_F_REPLY_DIR         0x40 /* Flow is in the reply direction. */
-#define OVS_CS_F_TRACKED           0x80 /* Conntrack has occurred. */
+#define OVS_CS_F_REPLY_DIR         0x08 /* Flow is in the reply direction. */
+#define OVS_CS_F_INVALID           0x10 /* Could not track connection. */
+#define OVS_CS_F_TRACKED           0x20 /* Conntrack has occurred. */
 
 /**
  * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
@@ -618,22 +620,25 @@
 
 /**
  * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
- * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags.
+ * @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack
+ * table. This allows future packets for the same connection to be identified
+ * as 'established' or 'related'. The flow key for the current packet will
+ * retain the pre-commit connection state.
  * @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
  * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
  * mask, the corresponding bit in the value is copied to the connection
  * tracking mark field in the connection.
- * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN
+ * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN
  * mask. For each bit set in the mask, the corresponding bit in the value is
  * copied to the connection tracking label field in the connection.
  * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
  */
 enum ovs_ct_attr {
 	OVS_CT_ATTR_UNSPEC,
-	OVS_CT_ATTR_FLAGS,      /* u8 bitmask of OVS_CT_F_*. */
+	OVS_CT_ATTR_COMMIT,     /* No argument, commits connection. */
 	OVS_CT_ATTR_ZONE,       /* u16 zone id. */
 	OVS_CT_ATTR_MARK,       /* mark to associate with this connection. */
-	OVS_CT_ATTR_LABEL,      /* label to associate with this connection. */
+	OVS_CT_ATTR_LABELS,     /* labels to associate with this connection. */
 	OVS_CT_ATTR_HELPER,     /* netlink helper to assist detection of
 				   related connections. */
 	__OVS_CT_ATTR_MAX
@@ -641,14 +646,6 @@
 
 #define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1)
 
-/*
- * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_*
- * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows
- * future packets for the same connection to be identified as 'established'
- * or 'related'.
- */
-#define OVS_CT_F_COMMIT		0x01
-
 /**
  * enum ovs_action_attr - Action types.
  *
@@ -705,7 +702,7 @@
 				       * data immediately followed by a mask.
 				       * The data must be zero for the unmasked
 				       * bits. */
-	OVS_ACTION_ATTR_CT,           /* One nested OVS_CT_ATTR_* . */
+	OVS_ACTION_ATTR_CT,           /* Nested OVS_CT_ATTR_* . */
 
 	__OVS_ACTION_ATTR_MAX,	      /* Nothing past this will be accepted
 				       * from userspace. */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 2881145..d3c4176 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -110,6 +110,7 @@
 	PERF_COUNT_SW_ALIGNMENT_FAULTS		= 7,
 	PERF_COUNT_SW_EMULATION_FAULTS		= 8,
 	PERF_COUNT_SW_DUMMY			= 9,
+	PERF_COUNT_SW_BPF_OUTPUT		= 10,
 
 	PERF_COUNT_SW_MAX,			/* non-ABI */
 };
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index a7a6979..fb81065 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -64,6 +64,8 @@
 #define PTRACE_GETSIGMASK	0x420a
 #define PTRACE_SETSIGMASK	0x420b
 
+#define PTRACE_SECCOMP_GET_FILTER	0x420c
+
 /* Read signals from a shared (process wide) queue */
 #define PTRACE_PEEKSIGINFO_SHARED	(1 << 0)
 
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 4db0b3c..123a5af 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -160,7 +160,7 @@
 
 /* Macros to handle rtattributes */
 
-#define RTA_ALIGNTO	4
+#define RTA_ALIGNTO	4U
 #define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) )
 #define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \
 			 (rta)->rta_len >= sizeof(struct rtattr) && \
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index df0e09b..9057d7a 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -11,8 +11,6 @@
 
 #include <linux/types.h>
 
-#include <linux/compiler.h>
-
 #define UFFD_API ((__u64)0xAA)
 /*
  * After implementing the respective features it will become:
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
index 9ce0839..f184909 100644
--- a/include/xen/interface/sched.h
+++ b/include/xen/interface/sched.h
@@ -107,5 +107,13 @@
 #define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
 #define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
 #define SHUTDOWN_watchdog   4  /* Restart because watchdog time expired.     */
+/*
+ * Domain asked to perform 'soft reset' for it. The expected behavior is to
+ * reset internal Xen state for the domain returning it to the point where it
+ * was created but leaving the domain's memory contents and vCPU contexts
+ * intact. This will allow the domain to start over and set up all Xen specific
+ * interfaces again.
+ */
+#define SHUTDOWN_soft_reset 5
 
 #endif /* __XEN_PUBLIC_SCHED_H__ */
diff --git a/ipc/msg.c b/ipc/msg.c
index 66c4f56..1471db9 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -137,13 +137,6 @@
 		return retval;
 	}
 
-	/* ipc_addid() locks msq upon success. */
-	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
-	if (id < 0) {
-		ipc_rcu_putref(msq, msg_rcu_free);
-		return id;
-	}
-
 	msq->q_stime = msq->q_rtime = 0;
 	msq->q_ctime = get_seconds();
 	msq->q_cbytes = msq->q_qnum = 0;
@@ -153,6 +146,13 @@
 	INIT_LIST_HEAD(&msq->q_receivers);
 	INIT_LIST_HEAD(&msq->q_senders);
 
+	/* ipc_addid() locks msq upon success. */
+	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
+	if (id < 0) {
+		ipc_rcu_putref(msq, msg_rcu_free);
+		return id;
+	}
+
 	ipc_unlock_object(&msq->q_perm);
 	rcu_read_unlock();
 
diff --git a/ipc/shm.c b/ipc/shm.c
index 222131e..4178727 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -551,12 +551,6 @@
 	if (IS_ERR(file))
 		goto no_file;
 
-	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
-	if (id < 0) {
-		error = id;
-		goto no_id;
-	}
-
 	shp->shm_cprid = task_tgid_vnr(current);
 	shp->shm_lprid = 0;
 	shp->shm_atim = shp->shm_dtim = 0;
@@ -565,6 +559,13 @@
 	shp->shm_nattch = 0;
 	shp->shm_file = file;
 	shp->shm_creator = current;
+
+	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
+	if (id < 0) {
+		error = id;
+		goto no_id;
+	}
+
 	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
 
 	/*
diff --git a/ipc/util.c b/ipc/util.c
index be42300..0f401d9 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -237,6 +237,10 @@
 	rcu_read_lock();
 	spin_lock(&new->lock);
 
+	current_euid_egid(&euid, &egid);
+	new->cuid = new->uid = euid;
+	new->gid = new->cgid = egid;
+
 	id = idr_alloc(&ids->ipcs_idr, new,
 		       (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
 		       GFP_NOWAIT);
@@ -249,10 +253,6 @@
 
 	ids->in_use++;
 
-	current_euid_egid(&euid, &egid);
-	new->cuid = new->uid = euid;
-	new->gid = new->cgid = egid;
-
 	if (next_id < 0) {
 		new->seq = ids->seq++;
 		if (ids->seq > IPCID_SEQ_MAX)
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 29ace10..3f4c99e 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/filter.h>
+#include <linux/perf_event.h>
 
 /* Called from syscall */
 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
@@ -48,7 +49,7 @@
 	array->map.key_size = attr->key_size;
 	array->map.value_size = attr->value_size;
 	array->map.max_entries = attr->max_entries;
-
+	array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
 	array->elem_size = elem_size;
 
 	return &array->map;
@@ -291,14 +292,23 @@
 
 	attr = perf_event_attrs(event);
 	if (IS_ERR(attr))
-		return (void *)attr;
+		goto err;
 
-	if (attr->type != PERF_TYPE_RAW &&
-	    attr->type != PERF_TYPE_HARDWARE) {
-		perf_event_release_kernel(event);
-		return ERR_PTR(-EINVAL);
-	}
-	return event;
+	if (attr->inherit)
+		goto err;
+
+	if (attr->type == PERF_TYPE_RAW)
+		return event;
+
+	if (attr->type == PERF_TYPE_HARDWARE)
+		return event;
+
+	if (attr->type == PERF_TYPE_SOFTWARE &&
+	    attr->config == PERF_COUNT_SW_BPF_OUTPUT)
+		return event;
+err:
+	perf_event_release_kernel(event);
+	return ERR_PTR(-EINVAL);
 }
 
 static void perf_event_fd_array_put_ptr(void *ptr)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 67c380c..8086471 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -82,6 +82,8 @@
 	if (fp == NULL)
 		return NULL;
 
+	kmemcheck_annotate_bitfield(fp, meta);
+
 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
 	if (aux == NULL) {
 		vfree(fp);
@@ -110,6 +112,8 @@
 
 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
 	if (fp != NULL) {
+		kmemcheck_annotate_bitfield(fp, meta);
+
 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
 		fp->pages = size / PAGE_SIZE;
 
@@ -727,6 +731,32 @@
 }
 EXPORT_SYMBOL_GPL(bpf_prog_free);
 
+/* RNG for unpriviledged user space with separated state from prandom_u32(). */
+static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
+
+void bpf_user_rnd_init_once(void)
+{
+	prandom_init_once(&bpf_user_rnd_state);
+}
+
+u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	/* Should someone ever have the rather unwise idea to use some
+	 * of the registers passed into this function, then note that
+	 * this function is called from native eBPF and classic-to-eBPF
+	 * transformations. Register assignments from both sides are
+	 * different, f.e. classic always sets fn(ctx, A, X) here.
+	 */
+	struct rnd_state *state;
+	u32 res;
+
+	state = &get_cpu_var(bpf_user_rnd_state);
+	res = prandom_u32_state(state);
+	put_cpu_var(state);
+
+	return res;
+}
+
 /* Weak definitions of helper functions in case we don't have bpf syscall. */
 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 83c209d..28592d7 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -88,6 +88,10 @@
 	htab->elem_size = sizeof(struct htab_elem) +
 			  round_up(htab->map.key_size, 8) +
 			  htab->map.value_size;
+
+	htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) +
+				   htab->elem_size * htab->map.max_entries,
+				   PAGE_SIZE) >> PAGE_SHIFT;
 	return &htab->map;
 
 free_htab:
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 1447ec0..4504ca6 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -93,13 +93,8 @@
 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
 };
 
-static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
-{
-	return prandom_u32();
-}
-
 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
-	.func		= bpf_get_prandom_u32,
+	.func		= bpf_user_rnd_u32,
 	.gpl_only	= false,
 	.ret_type	= RET_INTEGER,
 };
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 35bac8e..687dd6c 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -18,6 +18,8 @@
 #include <linux/filter.h>
 #include <linux/version.h>
 
+int sysctl_unprivileged_bpf_disabled __read_mostly;
+
 static LIST_HEAD(bpf_map_types);
 
 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
@@ -44,11 +46,38 @@
 	list_add(&tl->list_node, &bpf_map_types);
 }
 
+static int bpf_map_charge_memlock(struct bpf_map *map)
+{
+	struct user_struct *user = get_current_user();
+	unsigned long memlock_limit;
+
+	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+	atomic_long_add(map->pages, &user->locked_vm);
+
+	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
+		atomic_long_sub(map->pages, &user->locked_vm);
+		free_uid(user);
+		return -EPERM;
+	}
+	map->user = user;
+	return 0;
+}
+
+static void bpf_map_uncharge_memlock(struct bpf_map *map)
+{
+	struct user_struct *user = map->user;
+
+	atomic_long_sub(map->pages, &user->locked_vm);
+	free_uid(user);
+}
+
 /* called from workqueue */
 static void bpf_map_free_deferred(struct work_struct *work)
 {
 	struct bpf_map *map = container_of(work, struct bpf_map, work);
 
+	bpf_map_uncharge_memlock(map);
 	/* implementation dependent freeing */
 	map->ops->map_free(map);
 }
@@ -108,6 +137,10 @@
 
 	atomic_set(&map->refcnt, 1);
 
+	err = bpf_map_charge_memlock(map);
+	if (err)
+		goto free_map;
+
 	err = anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC);
 
 	if (err < 0)
@@ -402,6 +435,10 @@
 			 */
 			BUG_ON(!prog->aux->ops->get_func_proto);
 
+			if (insn->imm == BPF_FUNC_get_route_realm)
+				prog->dst_needed = 1;
+			if (insn->imm == BPF_FUNC_get_prandom_u32)
+				bpf_user_rnd_init_once();
 			if (insn->imm == BPF_FUNC_tail_call) {
 				/* mark bpf_tail_call as different opcode
 				 * to avoid conditional branch in
@@ -436,11 +473,37 @@
 	kfree(aux->used_maps);
 }
 
+static int bpf_prog_charge_memlock(struct bpf_prog *prog)
+{
+	struct user_struct *user = get_current_user();
+	unsigned long memlock_limit;
+
+	memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+	atomic_long_add(prog->pages, &user->locked_vm);
+	if (atomic_long_read(&user->locked_vm) > memlock_limit) {
+		atomic_long_sub(prog->pages, &user->locked_vm);
+		free_uid(user);
+		return -EPERM;
+	}
+	prog->aux->user = user;
+	return 0;
+}
+
+static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
+{
+	struct user_struct *user = prog->aux->user;
+
+	atomic_long_sub(prog->pages, &user->locked_vm);
+	free_uid(user);
+}
+
 static void __prog_put_rcu(struct rcu_head *rcu)
 {
 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
 
 	free_used_maps(aux);
+	bpf_prog_uncharge_memlock(aux->prog);
 	bpf_prog_free(aux->prog);
 }
 
@@ -457,6 +520,7 @@
 {
 	if (atomic_dec_and_test(&prog->aux->refcnt)) {
 		free_used_maps(prog->aux);
+		bpf_prog_uncharge_memlock(prog);
 		bpf_prog_free(prog);
 	}
 }
@@ -540,11 +604,18 @@
 	    attr->kern_version != LINUX_VERSION_CODE)
 		return -EINVAL;
 
+	if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
 	/* plain bpf_prog allocation */
 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
 	if (!prog)
 		return -ENOMEM;
 
+	err = bpf_prog_charge_memlock(prog);
+	if (err)
+		goto free_prog_nouncharge;
+
 	prog->len = attr->insn_cnt;
 
 	err = -EFAULT;
@@ -553,10 +624,10 @@
 		goto free_prog;
 
 	prog->orig_prog = NULL;
-	prog->jited = false;
+	prog->jited = 0;
 
 	atomic_set(&prog->aux->refcnt, 1);
-	prog->gpl_compatible = is_gpl;
+	prog->gpl_compatible = is_gpl ? 1 : 0;
 
 	/* find program type: socket_filter vs tracing_filter */
 	err = find_prog_type(type, prog);
@@ -586,6 +657,8 @@
 free_used_maps:
 	free_used_maps(prog->aux);
 free_prog:
+	bpf_prog_uncharge_memlock(prog);
+free_prog_nouncharge:
 	bpf_prog_free(prog);
 	return err;
 }
@@ -595,11 +668,7 @@
 	union bpf_attr attr = {};
 	int err;
 
-	/* the syscall is limited to root temporarily. This restriction will be
-	 * lifted when security audit is clean. Note that eBPF+tracing must have
-	 * this restriction, since it may pass kernel data to user space
-	 */
-	if (!capable(CAP_SYS_ADMIN))
+	if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
 		return -EPERM;
 
 	if (!access_ok(VERIFY_READ, uattr, 1))
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index b074b23..b56cf51 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -199,6 +199,7 @@
 	struct verifier_state_list **explored_states; /* search pruning optimization */
 	struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
 	u32 used_map_cnt;		/* number of used maps */
+	bool allow_ptr_leaks;
 };
 
 /* verbose verifier prints what it's seeing
@@ -244,6 +245,7 @@
 } func_limit[] = {
 	{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
 	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
+	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
 };
 
 static void print_verifier_state(struct verifier_env *env)
@@ -538,6 +540,21 @@
 		return -EINVAL;
 }
 
+static bool is_spillable_regtype(enum bpf_reg_type type)
+{
+	switch (type) {
+	case PTR_TO_MAP_VALUE:
+	case PTR_TO_MAP_VALUE_OR_NULL:
+	case PTR_TO_STACK:
+	case PTR_TO_CTX:
+	case FRAME_PTR:
+	case CONST_PTR_TO_MAP:
+		return true;
+	default:
+		return false;
+	}
+}
+
 /* check_stack_read/write functions track spill/fill of registers,
  * stack boundary and alignment are checked in check_mem_access()
  */
@@ -550,9 +567,7 @@
 	 */
 
 	if (value_regno >= 0 &&
-	    (state->regs[value_regno].type == PTR_TO_MAP_VALUE ||
-	     state->regs[value_regno].type == PTR_TO_STACK ||
-	     state->regs[value_regno].type == PTR_TO_CTX)) {
+	    is_spillable_regtype(state->regs[value_regno].type)) {
 
 		/* register containing pointer is being spilled into stack */
 		if (size != BPF_REG_SIZE) {
@@ -643,6 +658,20 @@
 	return -EACCES;
 }
 
+static bool is_pointer_value(struct verifier_env *env, int regno)
+{
+	if (env->allow_ptr_leaks)
+		return false;
+
+	switch (env->cur_state.regs[regno].type) {
+	case UNKNOWN_VALUE:
+	case CONST_IMM:
+		return false;
+	default:
+		return true;
+	}
+}
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
@@ -669,11 +698,21 @@
 	}
 
 	if (state->regs[regno].type == PTR_TO_MAP_VALUE) {
+		if (t == BPF_WRITE && value_regno >= 0 &&
+		    is_pointer_value(env, value_regno)) {
+			verbose("R%d leaks addr into map\n", value_regno);
+			return -EACCES;
+		}
 		err = check_map_access(env, regno, off, size);
 		if (!err && t == BPF_READ && value_regno >= 0)
 			mark_reg_unknown_value(state->regs, value_regno);
 
 	} else if (state->regs[regno].type == PTR_TO_CTX) {
+		if (t == BPF_WRITE && value_regno >= 0 &&
+		    is_pointer_value(env, value_regno)) {
+			verbose("R%d leaks addr into ctx\n", value_regno);
+			return -EACCES;
+		}
 		err = check_ctx_access(env, off, size, t);
 		if (!err && t == BPF_READ && value_regno >= 0)
 			mark_reg_unknown_value(state->regs, value_regno);
@@ -684,10 +723,17 @@
 			verbose("invalid stack off=%d size=%d\n", off, size);
 			return -EACCES;
 		}
-		if (t == BPF_WRITE)
+		if (t == BPF_WRITE) {
+			if (!env->allow_ptr_leaks &&
+			    state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
+			    size != BPF_REG_SIZE) {
+				verbose("attempt to corrupt spilled pointer on stack\n");
+				return -EACCES;
+			}
 			err = check_stack_write(state, off, size, value_regno);
-		else
+		} else {
 			err = check_stack_read(state, off, size, value_regno);
+		}
 	} else {
 		verbose("R%d invalid mem access '%s'\n",
 			regno, reg_type_str[state->regs[regno].type]);
@@ -775,8 +821,13 @@
 		return -EACCES;
 	}
 
-	if (arg_type == ARG_ANYTHING)
+	if (arg_type == ARG_ANYTHING) {
+		if (is_pointer_value(env, regno)) {
+			verbose("R%d leaks addr into helper function\n", regno);
+			return -EACCES;
+		}
 		return 0;
+	}
 
 	if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
 	    arg_type == ARG_PTR_TO_MAP_VALUE) {
@@ -860,7 +911,7 @@
 		 * don't allow any other map type to be passed into
 		 * the special func;
 		 */
-		if (bool_map != bool_func)
+		if (bool_func && bool_map != bool_func)
 			return -EINVAL;
 	}
 
@@ -950,8 +1001,9 @@
 }
 
 /* check validity of 32-bit and 64-bit arithmetic operations */
-static int check_alu_op(struct reg_state *regs, struct bpf_insn *insn)
+static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
 {
+	struct reg_state *regs = env->cur_state.regs;
 	u8 opcode = BPF_OP(insn->code);
 	int err;
 
@@ -976,6 +1028,12 @@
 		if (err)
 			return err;
 
+		if (is_pointer_value(env, insn->dst_reg)) {
+			verbose("R%d pointer arithmetic prohibited\n",
+				insn->dst_reg);
+			return -EACCES;
+		}
+
 		/* check dest operand */
 		err = check_reg_arg(regs, insn->dst_reg, DST_OP);
 		if (err)
@@ -1012,6 +1070,11 @@
 				 */
 				regs[insn->dst_reg] = regs[insn->src_reg];
 			} else {
+				if (is_pointer_value(env, insn->src_reg)) {
+					verbose("R%d partial copy of pointer\n",
+						insn->src_reg);
+					return -EACCES;
+				}
 				regs[insn->dst_reg].type = UNKNOWN_VALUE;
 				regs[insn->dst_reg].map_ptr = NULL;
 			}
@@ -1061,8 +1124,18 @@
 		/* pattern match 'bpf_add Rx, imm' instruction */
 		if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
 		    regs[insn->dst_reg].type == FRAME_PTR &&
-		    BPF_SRC(insn->code) == BPF_K)
+		    BPF_SRC(insn->code) == BPF_K) {
 			stack_relative = true;
+		} else if (is_pointer_value(env, insn->dst_reg)) {
+			verbose("R%d pointer arithmetic prohibited\n",
+				insn->dst_reg);
+			return -EACCES;
+		} else if (BPF_SRC(insn->code) == BPF_X &&
+			   is_pointer_value(env, insn->src_reg)) {
+			verbose("R%d pointer arithmetic prohibited\n",
+				insn->src_reg);
+			return -EACCES;
+		}
 
 		/* check dest operand */
 		err = check_reg_arg(regs, insn->dst_reg, DST_OP);
@@ -1101,6 +1174,12 @@
 		err = check_reg_arg(regs, insn->src_reg, SRC_OP);
 		if (err)
 			return err;
+
+		if (is_pointer_value(env, insn->src_reg)) {
+			verbose("R%d pointer comparison prohibited\n",
+				insn->src_reg);
+			return -EACCES;
+		}
 	} else {
 		if (insn->src_reg != BPF_REG_0) {
 			verbose("BPF_JMP uses reserved fields\n");
@@ -1155,6 +1234,9 @@
 			regs[insn->dst_reg].type = CONST_IMM;
 			regs[insn->dst_reg].imm = 0;
 		}
+	} else if (is_pointer_value(env, insn->dst_reg)) {
+		verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
+		return -EACCES;
 	} else if (BPF_SRC(insn->code) == BPF_K &&
 		   (opcode == BPF_JEQ || opcode == BPF_JNE)) {
 
@@ -1658,7 +1740,7 @@
 		}
 
 		if (class == BPF_ALU || class == BPF_ALU64) {
-			err = check_alu_op(regs, insn);
+			err = check_alu_op(env, insn);
 			if (err)
 				return err;
 
@@ -1816,6 +1898,11 @@
 				if (err)
 					return err;
 
+				if (is_pointer_value(env, BPF_REG_0)) {
+					verbose("R0 leaks addr as return value\n");
+					return -EACCES;
+				}
+
 process_bpf_exit:
 				insn_idx = pop_stack(env, &prev_insn_idx);
 				if (insn_idx < 0) {
@@ -2024,7 +2111,7 @@
 
 		cnt = env->prog->aux->ops->
 			convert_ctx_access(type, insn->dst_reg, insn->src_reg,
-					   insn->off, insn_buf);
+					   insn->off, insn_buf, env->prog);
 		if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
 			verbose("bpf verifier is misconfigured\n");
 			return -EINVAL;
@@ -2144,6 +2231,8 @@
 	if (ret < 0)
 		goto skip_full_check;
 
+	env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
+
 	ret = do_check(env);
 
 skip_full_check:
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f548f69..64754bf 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1243,11 +1243,7 @@
 					      PERF_EVENT_STATE_INACTIVE;
 }
 
-/*
- * Called at perf_event creation and when events are attached/detached from a
- * group.
- */
-static void perf_event__read_size(struct perf_event *event)
+static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
 {
 	int entry = sizeof(u64); /* value */
 	int size = 0;
@@ -1263,7 +1259,7 @@
 		entry += sizeof(u64);
 
 	if (event->attr.read_format & PERF_FORMAT_GROUP) {
-		nr += event->group_leader->nr_siblings;
+		nr += nr_siblings;
 		size += sizeof(u64);
 	}
 
@@ -1271,14 +1267,11 @@
 	event->read_size = size;
 }
 
-static void perf_event__header_size(struct perf_event *event)
+static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
 {
 	struct perf_sample_data *data;
-	u64 sample_type = event->attr.sample_type;
 	u16 size = 0;
 
-	perf_event__read_size(event);
-
 	if (sample_type & PERF_SAMPLE_IP)
 		size += sizeof(data->ip);
 
@@ -1303,6 +1296,17 @@
 	event->header_size = size;
 }
 
+/*
+ * Called at perf_event creation and when events are attached/detached from a
+ * group.
+ */
+static void perf_event__header_size(struct perf_event *event)
+{
+	__perf_event_read_size(event,
+			       event->group_leader->nr_siblings);
+	__perf_event_header_size(event, event->attr.sample_type);
+}
+
 static void perf_event__id_header_size(struct perf_event *event)
 {
 	struct perf_sample_data *data;
@@ -1330,6 +1334,27 @@
 	event->id_header_size = size;
 }
 
+static bool perf_event_validate_size(struct perf_event *event)
+{
+	/*
+	 * The values computed here will be over-written when we actually
+	 * attach the event.
+	 */
+	__perf_event_read_size(event, event->group_leader->nr_siblings + 1);
+	__perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
+	perf_event__id_header_size(event);
+
+	/*
+	 * Sum the lot; should not exceed the 64k limit we have on records.
+	 * Conservative limit to allow for callchains and other variable fields.
+	 */
+	if (event->read_size + event->header_size +
+	    event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
+		return false;
+
+	return true;
+}
+
 static void perf_group_attach(struct perf_event *event)
 {
 	struct perf_event *group_leader = event->group_leader, *pos;
@@ -5261,9 +5286,15 @@
 
 	if (sample_type & PERF_SAMPLE_RAW) {
 		if (data->raw) {
-			perf_output_put(handle, data->raw->size);
-			__output_copy(handle, data->raw->data,
-					   data->raw->size);
+			u32 raw_size = data->raw->size;
+			u32 real_size = round_up(raw_size + sizeof(u32),
+						 sizeof(u64)) - sizeof(u32);
+			u64 zero = 0;
+
+			perf_output_put(handle, real_size);
+			__output_copy(handle, data->raw->data, raw_size);
+			if (real_size - raw_size)
+				__output_copy(handle, &zero, real_size - raw_size);
 		} else {
 			struct {
 				u32	size;
@@ -5395,8 +5426,7 @@
 		else
 			size += sizeof(u32);
 
-		WARN_ON_ONCE(size & (sizeof(u64)-1));
-		header->size += size;
+		header->size += round_up(size, sizeof(u64));
 	}
 
 	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
@@ -8297,13 +8327,35 @@
 
 	if (move_group) {
 		gctx = group_leader->ctx;
+		mutex_lock_double(&gctx->mutex, &ctx->mutex);
+	} else {
+		mutex_lock(&ctx->mutex);
+	}
 
+	if (!perf_event_validate_size(event)) {
+		err = -E2BIG;
+		goto err_locked;
+	}
+
+	/*
+	 * Must be under the same ctx::mutex as perf_install_in_context(),
+	 * because we need to serialize with concurrent event creation.
+	 */
+	if (!exclusive_event_installable(event, ctx)) {
+		/* exclusive and group stuff are assumed mutually exclusive */
+		WARN_ON_ONCE(move_group);
+
+		err = -EBUSY;
+		goto err_locked;
+	}
+
+	WARN_ON_ONCE(ctx->parent_ctx);
+
+	if (move_group) {
 		/*
 		 * See perf_event_ctx_lock() for comments on the details
 		 * of swizzling perf_event::ctx.
 		 */
-		mutex_lock_double(&gctx->mutex, &ctx->mutex);
-
 		perf_remove_from_context(group_leader, false);
 
 		list_for_each_entry(sibling, &group_leader->sibling_list,
@@ -8311,13 +8363,7 @@
 			perf_remove_from_context(sibling, false);
 			put_ctx(gctx);
 		}
-	} else {
-		mutex_lock(&ctx->mutex);
-	}
 
-	WARN_ON_ONCE(ctx->parent_ctx);
-
-	if (move_group) {
 		/*
 		 * Wait for everybody to stop referencing the events through
 		 * the old lists, before installing it on new lists.
@@ -8349,22 +8395,29 @@
 		perf_event__state_init(group_leader);
 		perf_install_in_context(ctx, group_leader, group_leader->cpu);
 		get_ctx(ctx);
+
+		/*
+		 * Now that all events are installed in @ctx, nothing
+		 * references @gctx anymore, so drop the last reference we have
+		 * on it.
+		 */
+		put_ctx(gctx);
 	}
 
-	if (!exclusive_event_installable(event, ctx)) {
-		err = -EBUSY;
-		mutex_unlock(&ctx->mutex);
-		fput(event_file);
-		goto err_context;
-	}
+	/*
+	 * Precalculate sample_data sizes; do while holding ctx::mutex such
+	 * that we're serialized against further additions and before
+	 * perf_install_in_context() which is the point the event is active and
+	 * can use these values.
+	 */
+	perf_event__header_size(event);
+	perf_event__id_header_size(event);
 
 	perf_install_in_context(ctx, event, event->cpu);
 	perf_unpin_context(ctx);
 
-	if (move_group) {
+	if (move_group)
 		mutex_unlock(&gctx->mutex);
-		put_ctx(gctx);
-	}
 	mutex_unlock(&ctx->mutex);
 
 	put_online_cpus();
@@ -8376,12 +8429,6 @@
 	mutex_unlock(&current->perf_event_mutex);
 
 	/*
-	 * Precalculate sample_data sizes
-	 */
-	perf_event__header_size(event);
-	perf_event__id_header_size(event);
-
-	/*
 	 * Drop the reference on the group_event after placing the
 	 * new event on the sibling_list. This ensures destruction
 	 * of the group leader will find the pointer to itself in
@@ -8391,6 +8438,12 @@
 	fd_install(event_fd, event_file);
 	return event_fd;
 
+err_locked:
+	if (move_group)
+		mutex_unlock(&gctx->mutex);
+	mutex_unlock(&ctx->mutex);
+/* err_file: */
+	fput(event_file);
 err_context:
 	perf_unpin_context(ctx);
 	put_ctx(ctx);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index de41a68..e25a83b 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -22,7 +22,6 @@
 
 /**
  * handle_bad_irq - handle spurious and unhandled irqs
- * @irq:       the interrupt number
  * @desc:      description of the interrupt
  *
  * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
@@ -35,6 +34,7 @@
 	kstat_incr_irqs_this_cpu(desc);
 	ack_bad_irq(irq);
 }
+EXPORT_SYMBOL_GPL(handle_bad_irq);
 
 /*
  * Special, empty irq handler:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f9a59f6..4c21386 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1761,6 +1761,7 @@
 	kfree(__free_percpu_irq(irq, dev_id));
 	chip_bus_sync_unlock(desc);
 }
+EXPORT_SYMBOL_GPL(free_percpu_irq);
 
 /**
  *	setup_percpu_irq - setup a per-cpu interrupt
@@ -1790,9 +1791,10 @@
  *	@devname: An ascii name for the claiming device
  *	@dev_id: A percpu cookie passed back to the handler function
  *
- *	This call allocates interrupt resources, but doesn't
- *	automatically enable the interrupt. It has to be done on each
- *	CPU using enable_percpu_irq().
+ *	This call allocates interrupt resources and enables the
+ *	interrupt on the local CPU. If the interrupt is supposed to be
+ *	enabled on other CPUs, it has to be done on each CPU using
+ *	enable_percpu_irq().
  *
  *	Dev_id must be globally unique. It is a per-cpu variable, and
  *	the handler gets called with the interrupted CPU's instance of
@@ -1831,6 +1833,7 @@
 
 	return retval;
 }
+EXPORT_SYMBOL_GPL(request_percpu_irq);
 
 /**
  *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 7e6512b..be9149f 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -228,11 +228,7 @@
 {
 	struct irq_chip *chip = info->chip;
 
-	BUG_ON(!chip);
-	if (!chip->irq_mask)
-		chip->irq_mask = pci_msi_mask_irq;
-	if (!chip->irq_unmask)
-		chip->irq_unmask = pci_msi_unmask_irq;
+	BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
 	if (!chip->irq_set_affinity)
 		chip->irq_set_affinity = msi_domain_set_affinity;
 }
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index e3a8c95..a50ddc9 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -12,6 +12,7 @@
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
+#include <linux/mutex.h>
 
 #include "internals.h"
 
@@ -323,18 +324,29 @@
 
 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 {
+	static DEFINE_MUTEX(register_lock);
 	char name [MAX_NAMELEN];
 
-	if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
+	if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
 		return;
 
+	/*
+	 * irq directories are registered only when a handler is
+	 * added, not when the descriptor is created, so multiple
+	 * tasks might try to register at the same time.
+	 */
+	mutex_lock(&register_lock);
+
+	if (desc->dir)
+		goto out_unlock;
+
 	memset(name, 0, MAX_NAMELEN);
 	sprintf(name, "%d", irq);
 
 	/* create /proc/irq/1234 */
 	desc->dir = proc_mkdir(name, root_irq_dir);
 	if (!desc->dir)
-		return;
+		goto out_unlock;
 
 #ifdef CONFIG_SMP
 	/* create /proc/irq/<irq>/smp_affinity */
@@ -355,6 +367,9 @@
 
 	proc_create_data("spurious", 0444, desc->dir,
 			 &irq_spurious_proc_fops, (void *)(long)irq);
+
+out_unlock:
+	mutex_unlock(&register_lock);
 }
 
 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 8acfbf7..4e49cc4 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3068,7 +3068,7 @@
 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 			  int trylock, int read, int check, int hardirqs_off,
 			  struct lockdep_map *nest_lock, unsigned long ip,
-			  int references)
+			  int references, int pin_count)
 {
 	struct task_struct *curr = current;
 	struct lock_class *class = NULL;
@@ -3157,7 +3157,7 @@
 	hlock->waittime_stamp = 0;
 	hlock->holdtime_stamp = lockstat_clock();
 #endif
-	hlock->pin_count = 0;
+	hlock->pin_count = pin_count;
 
 	if (check && !mark_irqflags(curr, hlock))
 		return 0;
@@ -3343,7 +3343,7 @@
 			hlock_class(hlock)->subclass, hlock->trylock,
 				hlock->read, hlock->check, hlock->hardirqs_off,
 				hlock->nest_lock, hlock->acquire_ip,
-				hlock->references))
+				hlock->references, hlock->pin_count))
 			return 0;
 	}
 
@@ -3433,7 +3433,7 @@
 			hlock_class(hlock)->subclass, hlock->trylock,
 				hlock->read, hlock->check, hlock->hardirqs_off,
 				hlock->nest_lock, hlock->acquire_ip,
-				hlock->references))
+				hlock->references, hlock->pin_count))
 			return 0;
 	}
 
@@ -3583,7 +3583,7 @@
 	current->lockdep_recursion = 1;
 	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
 	__lock_acquire(lock, subclass, trylock, read, check,
-		       irqs_disabled_flags(flags), nest_lock, ip, 0);
+		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
 	current->lockdep_recursion = 0;
 	raw_local_irq_restore(flags);
 }
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 787320d..b760bae 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -1016,6 +1016,11 @@
 		break;
 	}
 #endif
+
+	case PTRACE_SECCOMP_GET_FILTER:
+		ret = seccomp_get_filter(child, addr, datavp);
+		break;
+
 	default:
 		break;
 	}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9f75f25..775d36c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3868,6 +3868,7 @@
 static void __init
 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
 {
+	static struct lock_class_key rcu_exp_sched_rdp_class;
 	unsigned long flags;
 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
 	struct rcu_node *rnp = rcu_get_root(rsp);
@@ -3883,6 +3884,10 @@
 	mutex_init(&rdp->exp_funnel_mutex);
 	rcu_boot_init_nocb_percpu_data(rdp);
 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
+	if (rsp == &rcu_sched_state)
+		lockdep_set_class_and_name(&rdp->exp_funnel_mutex,
+					   &rcu_exp_sched_rdp_class,
+					   "rcu_data_exp_sched");
 }
 
 /*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2f9c928..10a8faa 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2517,11 +2517,11 @@
 	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
 	 * schedule one last time. The schedule call will never return, and
 	 * the scheduled task must drop that reference.
-	 * The test for TASK_DEAD must occur while the runqueue locks are
-	 * still held, otherwise prev could be scheduled on another cpu, die
-	 * there before we look at prev->state, and then the reference would
-	 * be dropped twice.
-	 *		Manfred Spraul <manfred@colorfullife.com>
+	 *
+	 * We must observe prev->state before clearing prev->on_cpu (in
+	 * finish_lock_switch), otherwise a concurrent wakeup can get prev
+	 * running on another CPU and we could rave with its RUNNING -> DEAD
+	 * transition, resulting in a double drop.
 	 */
 	prev_state = prev->state;
 	vtime_task_switch(prev);
@@ -4934,7 +4934,15 @@
 	idle->state = TASK_RUNNING;
 	idle->se.exec_start = sched_clock();
 
-	do_set_cpus_allowed(idle, cpumask_of(cpu));
+#ifdef CONFIG_SMP
+	/*
+	 * Its possible that init_idle() gets called multiple times on a task,
+	 * in that case do_set_cpus_allowed() will not do the right thing.
+	 *
+	 * And since this is boot we can forgo the serialization.
+	 */
+	set_cpus_allowed_common(idle, cpumask_of(cpu));
+#endif
 	/*
 	 * We're having a chicken and egg problem, even though we are
 	 * holding rq->lock, the cpu isn't yet set to this cpu so the
@@ -4951,7 +4959,7 @@
 
 	rq->curr = rq->idle = idle;
 	idle->on_rq = TASK_ON_RQ_QUEUED;
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
 	idle->on_cpu = 1;
 #endif
 	raw_spin_unlock(&rq->lock);
@@ -4966,7 +4974,7 @@
 	idle->sched_class = &idle_sched_class;
 	ftrace_graph_init_idle_task(idle, cpu);
 	vtime_init_idle(idle, cpu);
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
 	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
 #endif
 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 68cda11..6d2a119 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1078,9 +1078,10 @@
 	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
 	 * We must ensure this doesn't happen until the switch is completely
 	 * finished.
+	 *
+	 * Pairs with the control dependency and rmb in try_to_wake_up().
 	 */
-	smp_wmb();
-	prev->on_cpu = 0;
+	smp_store_release(&prev->on_cpu, 0);
 #endif
 #ifdef CONFIG_DEBUG_SPINLOCK
 	/* this is a valid case when another task releases the spinlock */
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 5bd4779..580ac2d 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -347,6 +347,7 @@
 {
 	struct seccomp_filter *sfilter;
 	int ret;
+	const bool save_orig = config_enabled(CONFIG_CHECKPOINT_RESTORE);
 
 	if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
 		return ERR_PTR(-EINVAL);
@@ -370,7 +371,7 @@
 		return ERR_PTR(-ENOMEM);
 
 	ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
-					seccomp_check_filter);
+					seccomp_check_filter, save_orig);
 	if (ret < 0) {
 		kfree(sfilter);
 		return ERR_PTR(ret);
@@ -469,7 +470,7 @@
 static inline void seccomp_filter_free(struct seccomp_filter *filter)
 {
 	if (filter) {
-		bpf_prog_free(filter->prog);
+		bpf_prog_destroy(filter->prog);
 		kfree(filter);
 	}
 }
@@ -867,3 +868,76 @@
 	/* prctl interface doesn't have flags, so they are always zero. */
 	return do_seccomp(op, 0, uargs);
 }
+
+#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
+long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
+			void __user *data)
+{
+	struct seccomp_filter *filter;
+	struct sock_fprog_kern *fprog;
+	long ret;
+	unsigned long count = 0;
+
+	if (!capable(CAP_SYS_ADMIN) ||
+	    current->seccomp.mode != SECCOMP_MODE_DISABLED) {
+		return -EACCES;
+	}
+
+	spin_lock_irq(&task->sighand->siglock);
+	if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	filter = task->seccomp.filter;
+	while (filter) {
+		filter = filter->prev;
+		count++;
+	}
+
+	if (filter_off >= count) {
+		ret = -ENOENT;
+		goto out;
+	}
+	count -= filter_off;
+
+	filter = task->seccomp.filter;
+	while (filter && count > 1) {
+		filter = filter->prev;
+		count--;
+	}
+
+	if (WARN_ON(count != 1 || !filter)) {
+		/* The filter tree shouldn't shrink while we're using it. */
+		ret = -ENOENT;
+		goto out;
+	}
+
+	fprog = filter->prog->orig_prog;
+	if (!fprog) {
+		/* This must be a new non-cBPF filter, since we save every
+		 * every cBPF filter's orig_prog above when
+		 * CONFIG_CHECKPOINT_RESTORE is enabled.
+		 */
+		ret = -EMEDIUMTYPE;
+		goto out;
+	}
+
+	ret = fprog->len;
+	if (!data)
+		goto out;
+
+	get_seccomp_filter(task);
+	spin_unlock_irq(&task->sighand->siglock);
+
+	if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
+		ret = -EFAULT;
+
+	put_seccomp_filter(task);
+	return ret;
+
+out:
+	spin_unlock_irq(&task->sighand->siglock);
+	return ret;
+}
+#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e69201d..96c856b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -64,6 +64,7 @@
 #include <linux/binfmts.h>
 #include <linux/sched/sysctl.h>
 #include <linux/kexec.h>
+#include <linux/bpf.h>
 
 #include <asm/uaccess.h>
 #include <asm/processor.h>
@@ -1139,6 +1140,18 @@
 		.proc_handler	= timer_migration_handler,
 	},
 #endif
+#ifdef CONFIG_BPF_SYSCALL
+	{
+		.procname	= "unprivileged_bpf_disabled",
+		.data		= &sysctl_unprivileged_bpf_disabled,
+		.maxlen		= sizeof(sysctl_unprivileged_bpf_disabled),
+		.mode		= 0644,
+		/* only handle a transition from default "0" to "1" */
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &one,
+		.extra2		= &one,
+	},
+#endif
 	{ }
 };
 
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 841b72f..3a38775 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -217,7 +217,7 @@
 			continue;
 
 		/* Check the deviation from the watchdog clocksource. */
-		if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
+		if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
 			pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n",
 				cs->name);
 			pr_warn("                      '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 3739ac6..44d2cc0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1251,7 +1251,7 @@
 	set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
 	tk_set_wall_to_mono(tk, tmp);
 
-	timekeeping_update(tk, TK_MIRROR);
+	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
 
 	write_seqcount_end(&tk_core.seq);
 	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 0fe96c7..4228fd3 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -199,6 +199,11 @@
 	if (!event)
 		return -ENOENT;
 
+	/* make sure event is local and doesn't have pmu::count */
+	if (event->oncpu != smp_processor_id() ||
+	    event->pmu->count)
+		return -EINVAL;
+
 	/*
 	 * we don't know if the function is run successfully by the
 	 * return value. It can be judged in other places, such as
@@ -207,14 +212,58 @@
 	return perf_event_read_local(event);
 }
 
-const struct bpf_func_proto bpf_perf_event_read_proto = {
+static const struct bpf_func_proto bpf_perf_event_read_proto = {
 	.func		= bpf_perf_event_read,
-	.gpl_only	= false,
+	.gpl_only	= true,
 	.ret_type	= RET_INTEGER,
 	.arg1_type	= ARG_CONST_MAP_PTR,
 	.arg2_type	= ARG_ANYTHING,
 };
 
+static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
+{
+	struct pt_regs *regs = (struct pt_regs *) (long) r1;
+	struct bpf_map *map = (struct bpf_map *) (long) r2;
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	void *data = (void *) (long) r4;
+	struct perf_sample_data sample_data;
+	struct perf_event *event;
+	struct perf_raw_record raw = {
+		.size = size,
+		.data = data,
+	};
+
+	if (unlikely(index >= array->map.max_entries))
+		return -E2BIG;
+
+	event = (struct perf_event *)array->ptrs[index];
+	if (unlikely(!event))
+		return -ENOENT;
+
+	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
+		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
+		return -EINVAL;
+
+	if (unlikely(event->oncpu != smp_processor_id()))
+		return -EOPNOTSUPP;
+
+	perf_sample_data_init(&sample_data, 0, 0);
+	sample_data.raw = &raw;
+	perf_event_output(event, &sample_data, regs);
+	return 0;
+}
+
+static const struct bpf_func_proto bpf_perf_event_output_proto = {
+	.func		= bpf_perf_event_output,
+	.gpl_only	= true,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_CONST_MAP_PTR,
+	.arg3_type	= ARG_ANYTHING,
+	.arg4_type	= ARG_PTR_TO_STACK,
+	.arg5_type	= ARG_CONST_STACK_SIZE,
+};
+
 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
 {
 	switch (func_id) {
@@ -242,6 +291,8 @@
 		return &bpf_get_smp_processor_id_proto;
 	case BPF_FUNC_perf_event_read:
 		return &bpf_perf_event_read_proto;
+	case BPF_FUNC_perf_event_output:
+		return &bpf_perf_event_output_proto;
 	default:
 		return NULL;
 	}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ca71582..bcb14ca 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1458,13 +1458,13 @@
 	timer_stats_timer_set_start_info(&dwork->timer);
 
 	dwork->wq = wq;
+	/* timer isn't guaranteed to run in this cpu, record earlier */
+	if (cpu == WORK_CPU_UNBOUND)
+		cpu = raw_smp_processor_id();
 	dwork->cpu = cpu;
 	timer->expires = jiffies + delay;
 
-	if (unlikely(cpu != WORK_CPU_UNBOUND))
-		add_timer_on(timer, cpu);
-	else
-		add_timer(timer);
+	add_timer_on(timer, cpu);
 }
 
 /**
diff --git a/lib/Kconfig b/lib/Kconfig
index 2e491ac..f0df318 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -220,6 +220,7 @@
 
 config ZLIB_DEFLATE
 	tristate
+	select BITREVERSE
 
 config LZO_COMPRESS
 	tristate
diff --git a/lib/Makefile b/lib/Makefile
index 13a7c6a..8de3b01 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,7 +26,8 @@
 	 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
 	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
 	 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
-	 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
+	 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
+	 once.o
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
 obj-y += hexdump.o
diff --git a/lib/once.c b/lib/once.c
new file mode 100644
index 0000000..05c8604
--- /dev/null
+++ b/lib/once.c
@@ -0,0 +1,62 @@
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/once.h>
+#include <linux/random.h>
+
+struct once_work {
+	struct work_struct work;
+	struct static_key *key;
+};
+
+static void once_deferred(struct work_struct *w)
+{
+	struct once_work *work;
+
+	work = container_of(w, struct once_work, work);
+	BUG_ON(!static_key_enabled(work->key));
+	static_key_slow_dec(work->key);
+	kfree(work);
+}
+
+static void once_disable_jump(struct static_key *key)
+{
+	struct once_work *w;
+
+	w = kmalloc(sizeof(*w), GFP_ATOMIC);
+	if (!w)
+		return;
+
+	INIT_WORK(&w->work, once_deferred);
+	w->key = key;
+	schedule_work(&w->work);
+}
+
+static DEFINE_SPINLOCK(once_lock);
+
+bool __do_once_start(bool *done, unsigned long *flags)
+	__acquires(once_lock)
+{
+	spin_lock_irqsave(&once_lock, *flags);
+	if (*done) {
+		spin_unlock_irqrestore(&once_lock, *flags);
+		/* Keep sparse happy by restoring an even lock count on
+		 * this lock. In case we return here, we don't call into
+		 * __do_once_done but return early in the DO_ONCE() macro.
+		 */
+		__acquire(once_lock);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(__do_once_start);
+
+void __do_once_done(bool *done, struct static_key *once_key,
+		    unsigned long *flags)
+	__releases(once_lock)
+{
+	*done = true;
+	spin_unlock_irqrestore(&once_lock, *flags);
+	once_disable_jump(once_key);
+}
+EXPORT_SYMBOL(__do_once_done);
diff --git a/lib/random32.c b/lib/random32.c
index 0bee183..1211191 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -181,7 +181,7 @@
 	 * No locking on the CPUs, but then somewhat random results are, well,
 	 * expected.
 	 */
-	for_each_possible_cpu (i) {
+	for_each_possible_cpu(i) {
 		struct rnd_state *state = &per_cpu(net_rand_state, i);
 
 		state->s1 = __seed(state->s1 ^ entropy, 2U);
@@ -201,7 +201,7 @@
 	prandom_state_selftest();
 
 	for_each_possible_cpu(i) {
-		struct rnd_state *state = &per_cpu(net_rand_state,i);
+		struct rnd_state *state = &per_cpu(net_rand_state, i);
 		u32 weak_seed = (i + jiffies) ^ random_get_entropy();
 
 		prandom_seed_early(state, weak_seed, true);
@@ -238,13 +238,30 @@
 	add_timer(&seed_timer);
 }
 
+void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
+{
+	int i;
+
+	for_each_possible_cpu(i) {
+		struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
+		u32 seeds[4];
+
+		get_random_bytes(&seeds, sizeof(seeds));
+		state->s1 = __seed(seeds[0],   2U);
+		state->s2 = __seed(seeds[1],   8U);
+		state->s3 = __seed(seeds[2],  16U);
+		state->s4 = __seed(seeds[3], 128U);
+
+		prandom_warmup(state);
+	}
+}
+
 /*
  *	Generate better values after random number generator
  *	is fully initialized.
  */
 static void __prandom_reseed(bool late)
 {
-	int i;
 	unsigned long flags;
 	static bool latch = false;
 	static DEFINE_SPINLOCK(lock);
@@ -266,19 +283,7 @@
 		goto out;
 
 	latch = true;
-
-	for_each_possible_cpu(i) {
-		struct rnd_state *state = &per_cpu(net_rand_state,i);
-		u32 seeds[4];
-
-		get_random_bytes(&seeds, sizeof(seeds));
-		state->s1 = __seed(seeds[0],   2U);
-		state->s2 = __seed(seeds[1],   8U);
-		state->s3 = __seed(seeds[2],  16U);
-		state->s4 = __seed(seeds[3], 128U);
-
-		prandom_warmup(state);
-	}
+	prandom_seed_full_state(&net_rand_state);
 out:
 	spin_unlock_irqrestore(&lock, flags);
 }
diff --git a/lib/string.c b/lib/string.c
index 13d1e84..84775ba 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -27,6 +27,10 @@
 #include <linux/bug.h>
 #include <linux/errno.h>
 
+#include <asm/byteorder.h>
+#include <asm/word-at-a-time.h>
+#include <asm/page.h>
+
 #ifndef __HAVE_ARCH_STRNCASECMP
 /**
  * strncasecmp - Case insensitive, length-limited string comparison
@@ -146,6 +150,91 @@
 EXPORT_SYMBOL(strlcpy);
 #endif
 
+#ifndef __HAVE_ARCH_STRSCPY
+/**
+ * strscpy - Copy a C-string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @count: Size of destination buffer
+ *
+ * Copy the string, or as much of it as fits, into the dest buffer.
+ * The routine returns the number of characters copied (not including
+ * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough.
+ * The behavior is undefined if the string buffers overlap.
+ * The destination buffer is always NUL terminated, unless it's zero-sized.
+ *
+ * Preferred to strlcpy() since the API doesn't require reading memory
+ * from the src string beyond the specified "count" bytes, and since
+ * the return value is easier to error-check than strlcpy()'s.
+ * In addition, the implementation is robust to the string changing out
+ * from underneath it, unlike the current strlcpy() implementation.
+ *
+ * Preferred to strncpy() since it always returns a valid string, and
+ * doesn't unnecessarily force the tail of the destination buffer to be
+ * zeroed.  If the zeroing is desired, it's likely cleaner to use strscpy()
+ * with an overflow test, then just memset() the tail of the dest buffer.
+ */
+ssize_t strscpy(char *dest, const char *src, size_t count)
+{
+	const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+	size_t max = count;
+	long res = 0;
+
+	if (count == 0)
+		return -E2BIG;
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+	/*
+	 * If src is unaligned, don't cross a page boundary,
+	 * since we don't know if the next page is mapped.
+	 */
+	if ((long)src & (sizeof(long) - 1)) {
+		size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1));
+		if (limit < max)
+			max = limit;
+	}
+#else
+	/* If src or dest is unaligned, don't do word-at-a-time. */
+	if (((long) dest | (long) src) & (sizeof(long) - 1))
+		max = 0;
+#endif
+
+	while (max >= sizeof(unsigned long)) {
+		unsigned long c, data;
+
+		c = *(unsigned long *)(src+res);
+		if (has_zero(c, &data, &constants)) {
+			data = prep_zero_mask(c, data, &constants);
+			data = create_zero_mask(data);
+			*(unsigned long *)(dest+res) = c & zero_bytemask(data);
+			return res + find_zero(data);
+		}
+		*(unsigned long *)(dest+res) = c;
+		res += sizeof(unsigned long);
+		count -= sizeof(unsigned long);
+		max -= sizeof(unsigned long);
+	}
+
+	while (count) {
+		char c;
+
+		c = src[res];
+		dest[res] = c;
+		if (!c)
+			return res;
+		res++;
+		count--;
+	}
+
+	/* Hit buffer length without finding a NUL; force NUL-termination. */
+	if (res)
+		dest[res-1] = '\0';
+
+	return -E2BIG;
+}
+EXPORT_SYMBOL(strscpy);
+#endif
+
 #ifndef __HAVE_ARCH_STRCAT
 /**
  * strcat - Append one %NUL-terminated string to another
diff --git a/mm/dmapool.c b/mm/dmapool.c
index 71a8998..312a716 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -394,7 +394,7 @@
 	list_for_each_entry(page, &pool->page_list, page_list) {
 		if (dma < page->dma)
 			continue;
-		if (dma < (page->dma + pool->allocation))
+		if ((dma - page->dma) < pool->allocation)
 			return page;
 	}
 	return NULL;
diff --git a/mm/filemap.c b/mm/filemap.c
index 72940fb..1cc5467 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2473,6 +2473,21 @@
 						iov_iter_count(i));
 
 again:
+		/*
+		 * Bring in the user page that we will copy from _first_.
+		 * Otherwise there's a nasty deadlock on copying from the
+		 * same page as we're writing to, without it being marked
+		 * up-to-date.
+		 *
+		 * Not only is this an optimisation, but it is also required
+		 * to check that the address is actually valid, when atomic
+		 * usercopies are used, below.
+		 */
+		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+			status = -EFAULT;
+			break;
+		}
+
 		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
 						&page, &fsdata);
 		if (unlikely(status < 0))
@@ -2480,17 +2495,8 @@
 
 		if (mapping_writably_mapped(mapping))
 			flush_dcache_page(page);
-		/*
-		 * 'page' is now locked.  If we are trying to copy from a
-		 * mapping of 'page' in userspace, the copy might fault and
-		 * would need PageUptodate() to complete.  But, page can not be
-		 * made Uptodate without acquiring the page lock, which we hold.
-		 * Deadlock.  Avoid with pagefault_disable().  Fix up below with
-		 * iov_iter_fault_in_readable().
-		 */
-		pagefault_disable();
+
 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
-		pagefault_enable();
 		flush_dcache_page(page);
 
 		status = a_ops->write_end(file, mapping, pos, bytes, copied,
@@ -2513,14 +2519,6 @@
 			 */
 			bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
 						iov_iter_single_seg_count(i));
-			/*
-			 * This is the fallback to recover if the copy from
-			 * userspace above faults.
-			 */
-			if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
-				status = -EFAULT;
-				break;
-			}
 			goto again;
 		}
 		pos += copied;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 999fb0a..9cc7734 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3202,6 +3202,14 @@
 			continue;
 
 		/*
+		 * Shared VMAs have their own reserves and do not affect
+		 * MAP_PRIVATE accounting but it is possible that a shared
+		 * VMA is using the same page so check and skip such VMAs.
+		 */
+		if (iter_vma->vm_flags & VM_MAYSHARE)
+			continue;
+
+		/*
 		 * Unmap the page from other VMAs without their own reserves.
 		 * They get marked to be SIGKILLed if they fault in these
 		 * areas. This is because a future no-page fault on this VMA
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6ddaeba..d9b5c81 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -644,12 +644,14 @@
 }
 
 /*
+ * Return page count for single (non recursive) @memcg.
+ *
  * Implementation Note: reading percpu statistics for memcg.
  *
  * Both of vmstat[] and percpu_counter has threshold and do periodic
  * synchronization to implement "quick" read. There are trade-off between
  * reading cost and precision of value. Then, we may have a chance to implement
- * a periodic synchronizion of counter in memcg's counter.
+ * a periodic synchronization of counter in memcg's counter.
  *
  * But this _read() function is used for user interface now. The user accounts
  * memory usage by memory cgroup and he _always_ requires exact value because
@@ -659,17 +661,24 @@
  *
  * If there are kernel internal actions which can make use of some not-exact
  * value, and reading all cpu value can be performance bottleneck in some
- * common workload, threashold and synchonization as vmstat[] should be
+ * common workload, threshold and synchronization as vmstat[] should be
  * implemented.
  */
-static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
-				 enum mem_cgroup_stat_index idx)
+static unsigned long
+mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
 {
 	long val = 0;
 	int cpu;
 
+	/* Per-cpu values can be negative, use a signed accumulator */
 	for_each_possible_cpu(cpu)
 		val += per_cpu(memcg->stat->count[idx], cpu);
+	/*
+	 * Summing races with updates, so val may be negative.  Avoid exposing
+	 * transient negative values.
+	 */
+	if (val < 0)
+		val = 0;
 	return val;
 }
 
@@ -1254,7 +1263,7 @@
 		for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
 			if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 				continue;
-			pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
+			pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
 				K(mem_cgroup_read_stat(iter, i)));
 		}
 
@@ -2819,14 +2828,11 @@
 			       enum mem_cgroup_stat_index idx)
 {
 	struct mem_cgroup *iter;
-	long val = 0;
+	unsigned long val = 0;
 
-	/* Per-cpu values can be negative, use a signed accumulator */
 	for_each_mem_cgroup_tree(iter, memcg)
 		val += mem_cgroup_read_stat(iter, idx);
 
-	if (val < 0) /* race ? */
-		val = 0;
 	return val;
 }
 
@@ -3169,7 +3175,7 @@
 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 			continue;
-		seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
+		seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
 			   mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
 	}
 
@@ -3194,13 +3200,13 @@
 			   (u64)memsw * PAGE_SIZE);
 
 	for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-		long long val = 0;
+		unsigned long long val = 0;
 
 		if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
 			continue;
 		for_each_mem_cgroup_tree(mi, memcg)
 			val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
-		seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
+		seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
 	}
 
 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
@@ -3381,6 +3387,7 @@
 	ret = page_counter_memparse(args, "-1", &threshold);
 	if (ret)
 		return ret;
+	threshold <<= PAGE_SHIFT;
 
 	mutex_lock(&memcg->thresholds_lock);
 
@@ -4179,7 +4186,6 @@
 	if (memcg_wb_domain_init(memcg, GFP_KERNEL))
 		goto out_free_stat;
 
-	spin_lock_init(&memcg->pcp_counter_lock);
 	return memcg;
 
 out_free_stat:
diff --git a/mm/memory.c b/mm/memory.c
index 9cb2747..deb679c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2426,6 +2426,8 @@
 	if (details.last_index < details.first_index)
 		details.last_index = ULONG_MAX;
 
+
+	/* DAX uses i_mmap_lock to serialise file truncate vs page fault */
 	i_mmap_lock_write(mapping);
 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
diff --git a/mm/migrate.c b/mm/migrate.c
index 7452a00..842ecd7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -740,6 +740,15 @@
 	if (PageSwapBacked(page))
 		SetPageSwapBacked(newpage);
 
+	/*
+	 * Indirectly called below, migrate_page_copy() copies PG_dirty and thus
+	 * needs newpage's memcg set to transfer memcg dirty page accounting.
+	 * So perform memcg migration in two steps:
+	 * 1. set newpage->mem_cgroup (here)
+	 * 2. clear page->mem_cgroup (below)
+	 */
+	set_page_memcg(newpage, page_memcg(page));
+
 	mapping = page_mapping(page);
 	if (!mapping)
 		rc = migrate_page(mapping, newpage, page, mode);
@@ -756,9 +765,10 @@
 		rc = fallback_migrate_page(mapping, newpage, page, mode);
 
 	if (rc != MIGRATEPAGE_SUCCESS) {
+		set_page_memcg(newpage, NULL);
 		newpage->mapping = NULL;
 	} else {
-		mem_cgroup_migrate(page, newpage, false);
+		set_page_memcg(page, NULL);
 		if (page_was_mapped)
 			remove_migration_ptes(page, newpage);
 		page->mapping = NULL;
diff --git a/mm/readahead.c b/mm/readahead.c
index 60cd846..24682f6 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -89,8 +89,8 @@
 	while (!list_empty(pages)) {
 		page = list_to_page(pages);
 		list_del(&page->lru);
-		if (add_to_page_cache_lru(page, mapping,
-					page->index, GFP_KERNEL)) {
+		if (add_to_page_cache_lru(page, mapping, page->index,
+				GFP_KERNEL & mapping_gfp_mask(mapping))) {
 			read_cache_pages_invalidate_page(mapping, page);
 			continue;
 		}
@@ -127,8 +127,8 @@
 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
 		struct page *page = list_to_page(pages);
 		list_del(&page->lru);
-		if (!add_to_page_cache_lru(page, mapping,
-					page->index, GFP_KERNEL)) {
+		if (!add_to_page_cache_lru(page, mapping, page->index,
+				GFP_KERNEL & mapping_gfp_mask(mapping))) {
 			mapping->a_ops->readpage(filp, page);
 		}
 		page_cache_release(page);
diff --git a/mm/slab.c b/mm/slab.c
index c77ebe6..4fcc5dd 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2190,9 +2190,16 @@
 			size += BYTES_PER_WORD;
 	}
 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
-	if (size >= kmalloc_size(INDEX_NODE + 1)
-	    && cachep->object_size > cache_line_size()
-	    && ALIGN(size, cachep->align) < PAGE_SIZE) {
+	/*
+	 * To activate debug pagealloc, off-slab management is necessary
+	 * requirement. In early phase of initialization, small sized slab
+	 * doesn't get initialized so it would not be possible. So, we need
+	 * to check size >= 256. It guarantees that all necessary small
+	 * sized slab is initialized in current slab initialization sequence.
+	 */
+	if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
+		size >= 256 && cachep->object_size > cache_line_size() &&
+		ALIGN(size, cachep->align) < PAGE_SIZE) {
 		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
 		size = PAGE_SIZE;
 	}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 4f5cd97..fbf1448 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1363,15 +1363,16 @@
 
 static void vmstat_update(struct work_struct *w)
 {
-	if (refresh_cpu_vm_stats())
+	if (refresh_cpu_vm_stats()) {
 		/*
 		 * Counters were updated so we expect more updates
 		 * to occur in the future. Keep on running the
 		 * update worker thread.
 		 */
-		schedule_delayed_work(this_cpu_ptr(&vmstat_work),
+		schedule_delayed_work_on(smp_processor_id(),
+			this_cpu_ptr(&vmstat_work),
 			round_jiffies_relative(sysctl_stat_interval));
-	else {
+	} else {
 		/*
 		 * We did not update any counters so the app may be in
 		 * a mode where it does not cause counter updates.
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c
index ae1896f..83b19e0 100644
--- a/net/6lowpan/core.c
+++ b/net/6lowpan/core.c
@@ -17,6 +17,11 @@
 
 void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype)
 {
+	dev->addr_len = EUI64_ADDR_LEN;
+	dev->type = ARPHRD_6LOWPAN;
+	dev->mtu = IPV6_MIN_MTU;
+	dev->priv_flags |= IFF_NO_QUEUE;
+
 	lowpan_priv(dev)->lltype = lltype;
 }
 EXPORT_SYMBOL(lowpan_netdev_setup);
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 78c8a49..346b5c1 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -49,36 +49,178 @@
 #include <linux/bitops.h>
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
+
 #include <net/6lowpan.h>
 #include <net/ipv6.h>
-#include <net/af_ieee802154.h>
+
+/* special link-layer handling */
+#include <net/mac802154.h>
 
 #include "nhc.h"
 
+/* Values of fields within the IPHC encoding first byte */
+#define LOWPAN_IPHC_TF_MASK	0x18
+#define LOWPAN_IPHC_TF_00	0x00
+#define LOWPAN_IPHC_TF_01	0x08
+#define LOWPAN_IPHC_TF_10	0x10
+#define LOWPAN_IPHC_TF_11	0x18
+
+#define LOWPAN_IPHC_NH		0x04
+
+#define LOWPAN_IPHC_HLIM_MASK	0x03
+#define LOWPAN_IPHC_HLIM_00	0x00
+#define LOWPAN_IPHC_HLIM_01	0x01
+#define LOWPAN_IPHC_HLIM_10	0x02
+#define LOWPAN_IPHC_HLIM_11	0x03
+
+/* Values of fields within the IPHC encoding second byte */
+#define LOWPAN_IPHC_CID		0x80
+
+#define LOWPAN_IPHC_SAC		0x40
+
+#define LOWPAN_IPHC_SAM_MASK	0x30
+#define LOWPAN_IPHC_SAM_00	0x00
+#define LOWPAN_IPHC_SAM_01	0x10
+#define LOWPAN_IPHC_SAM_10	0x20
+#define LOWPAN_IPHC_SAM_11	0x30
+
+#define LOWPAN_IPHC_M		0x08
+
+#define LOWPAN_IPHC_DAC		0x04
+
+#define LOWPAN_IPHC_DAM_MASK	0x03
+#define LOWPAN_IPHC_DAM_00	0x00
+#define LOWPAN_IPHC_DAM_01	0x01
+#define LOWPAN_IPHC_DAM_10	0x02
+#define LOWPAN_IPHC_DAM_11	0x03
+
+/* ipv6 address based on mac
+ * second bit-flip (Universe/Local) is done according RFC2464
+ */
+#define is_addr_mac_addr_based(a, m) \
+	((((a)->s6_addr[8])  == (((m)[0]) ^ 0x02)) &&	\
+	 (((a)->s6_addr[9])  == (m)[1]) &&		\
+	 (((a)->s6_addr[10]) == (m)[2]) &&		\
+	 (((a)->s6_addr[11]) == (m)[3]) &&		\
+	 (((a)->s6_addr[12]) == (m)[4]) &&		\
+	 (((a)->s6_addr[13]) == (m)[5]) &&		\
+	 (((a)->s6_addr[14]) == (m)[6]) &&		\
+	 (((a)->s6_addr[15]) == (m)[7]))
+
+/* check whether we can compress the IID to 16 bits,
+ * it's possible for unicast addresses with first 49 bits are zero only.
+ */
+#define lowpan_is_iid_16_bit_compressable(a)	\
+	((((a)->s6_addr16[4]) == 0) &&		\
+	 (((a)->s6_addr[10]) == 0) &&		\
+	 (((a)->s6_addr[11]) == 0xff) &&	\
+	 (((a)->s6_addr[12]) == 0xfe) &&	\
+	 (((a)->s6_addr[13]) == 0))
+
+/* check whether the 112-bit gid of the multicast address is mappable to: */
+
+/* 48 bits, FFXX::00XX:XXXX:XXXX */
+#define lowpan_is_mcast_addr_compressable48(a)	\
+	((((a)->s6_addr16[1]) == 0) &&		\
+	 (((a)->s6_addr16[2]) == 0) &&		\
+	 (((a)->s6_addr16[3]) == 0) &&		\
+	 (((a)->s6_addr16[4]) == 0) &&		\
+	 (((a)->s6_addr[10]) == 0))
+
+/* 32 bits, FFXX::00XX:XXXX */
+#define lowpan_is_mcast_addr_compressable32(a)	\
+	((((a)->s6_addr16[1]) == 0) &&		\
+	 (((a)->s6_addr16[2]) == 0) &&		\
+	 (((a)->s6_addr16[3]) == 0) &&		\
+	 (((a)->s6_addr16[4]) == 0) &&		\
+	 (((a)->s6_addr16[5]) == 0) &&		\
+	 (((a)->s6_addr[12]) == 0))
+
+/* 8 bits, FF02::00XX */
+#define lowpan_is_mcast_addr_compressable8(a)	\
+	((((a)->s6_addr[1])  == 2) &&		\
+	 (((a)->s6_addr16[1]) == 0) &&		\
+	 (((a)->s6_addr16[2]) == 0) &&		\
+	 (((a)->s6_addr16[3]) == 0) &&		\
+	 (((a)->s6_addr16[4]) == 0) &&		\
+	 (((a)->s6_addr16[5]) == 0) &&		\
+	 (((a)->s6_addr16[6]) == 0) &&		\
+	 (((a)->s6_addr[14]) == 0))
+
+static inline void iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr,
+						const void *lladdr)
+{
+	/* fe:80::XXXX:XXXX:XXXX:XXXX
+	 *        \_________________/
+	 *              hwaddr
+	 */
+	ipaddr->s6_addr[0] = 0xFE;
+	ipaddr->s6_addr[1] = 0x80;
+	memcpy(&ipaddr->s6_addr[8], lladdr, EUI64_ADDR_LEN);
+	/* second bit-flip (Universe/Local)
+	 * is done according RFC2464
+	 */
+	ipaddr->s6_addr[8] ^= 0x02;
+}
+
+static inline void iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr,
+						 const void *lladdr)
+{
+	const struct ieee802154_addr *addr = lladdr;
+	u8 eui64[EUI64_ADDR_LEN] = { };
+
+	switch (addr->mode) {
+	case IEEE802154_ADDR_LONG:
+		ieee802154_le64_to_be64(eui64, &addr->extended_addr);
+		iphc_uncompress_eui64_lladdr(ipaddr, eui64);
+		break;
+	case IEEE802154_ADDR_SHORT:
+		/* fe:80::ff:fe00:XXXX
+		 *                \__/
+		 *             short_addr
+		 *
+		 * Universe/Local bit is zero.
+		 */
+		ipaddr->s6_addr[0] = 0xFE;
+		ipaddr->s6_addr[1] = 0x80;
+		ipaddr->s6_addr[11] = 0xFF;
+		ipaddr->s6_addr[12] = 0xFE;
+		ieee802154_le16_to_be16(&ipaddr->s6_addr16[7],
+					&addr->short_addr);
+		break;
+	default:
+		/* should never handled and filtered by 802154 6lowpan */
+		WARN_ON_ONCE(1);
+		break;
+	}
+}
+
 /* Uncompress address function for source and
  * destination address(non-multicast).
  *
- * address_mode is sam value or dam value.
+ * address_mode is the masked value for sam or dam value
  */
-static int uncompress_addr(struct sk_buff *skb,
-			   struct in6_addr *ipaddr, const u8 address_mode,
-			   const u8 *lladdr, const u8 addr_type,
-			   const u8 addr_len)
+static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev,
+			   struct in6_addr *ipaddr, u8 address_mode,
+			   const void *lladdr)
 {
 	bool fail;
 
 	switch (address_mode) {
-	case LOWPAN_IPHC_ADDR_00:
+	/* SAM and DAM are the same here */
+	case LOWPAN_IPHC_DAM_00:
 		/* for global link addresses */
 		fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
 		break;
-	case LOWPAN_IPHC_ADDR_01:
+	case LOWPAN_IPHC_SAM_01:
+	case LOWPAN_IPHC_DAM_01:
 		/* fe:80::XXXX:XXXX:XXXX:XXXX */
 		ipaddr->s6_addr[0] = 0xFE;
 		ipaddr->s6_addr[1] = 0x80;
 		fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
 		break;
-	case LOWPAN_IPHC_ADDR_02:
+	case LOWPAN_IPHC_SAM_10:
+	case LOWPAN_IPHC_DAM_10:
 		/* fe:80::ff:fe00:XXXX */
 		ipaddr->s6_addr[0] = 0xFE;
 		ipaddr->s6_addr[1] = 0x80;
@@ -86,38 +228,16 @@
 		ipaddr->s6_addr[12] = 0xFE;
 		fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
 		break;
-	case LOWPAN_IPHC_ADDR_03:
+	case LOWPAN_IPHC_SAM_11:
+	case LOWPAN_IPHC_DAM_11:
 		fail = false;
-		switch (addr_type) {
-		case IEEE802154_ADDR_LONG:
-			/* fe:80::XXXX:XXXX:XXXX:XXXX
-			 *        \_________________/
-			 *              hwaddr
-			 */
-			ipaddr->s6_addr[0] = 0xFE;
-			ipaddr->s6_addr[1] = 0x80;
-			memcpy(&ipaddr->s6_addr[8], lladdr, addr_len);
-			/* second bit-flip (Universe/Local)
-			 * is done according RFC2464
-			 */
-			ipaddr->s6_addr[8] ^= 0x02;
-			break;
-		case IEEE802154_ADDR_SHORT:
-			/* fe:80::ff:fe00:XXXX
-			 *		  \__/
-			 *	       short_addr
-			 *
-			 * Universe/Local bit is zero.
-			 */
-			ipaddr->s6_addr[0] = 0xFE;
-			ipaddr->s6_addr[1] = 0x80;
-			ipaddr->s6_addr[11] = 0xFF;
-			ipaddr->s6_addr[12] = 0xFE;
-			ipaddr->s6_addr16[7] = htons(*((u16 *)lladdr));
+		switch (lowpan_priv(dev)->lltype) {
+		case LOWPAN_LLTYPE_IEEE802154:
+			iphc_uncompress_802154_lladdr(ipaddr, lladdr);
 			break;
 		default:
-			pr_debug("Invalid addr_type set\n");
-			return -EINVAL;
+			iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
+			break;
 		}
 		break;
 	default:
@@ -141,24 +261,25 @@
  */
 static int uncompress_context_based_src_addr(struct sk_buff *skb,
 					     struct in6_addr *ipaddr,
-					     const u8 sam)
+					     u8 address_mode)
 {
-	switch (sam) {
-	case LOWPAN_IPHC_ADDR_00:
+	switch (address_mode) {
+	case LOWPAN_IPHC_SAM_00:
 		/* unspec address ::
 		 * Do nothing, address is already ::
 		 */
 		break;
-	case LOWPAN_IPHC_ADDR_01:
+	case LOWPAN_IPHC_SAM_01:
 		/* TODO */
-	case LOWPAN_IPHC_ADDR_02:
+	case LOWPAN_IPHC_SAM_10:
 		/* TODO */
-	case LOWPAN_IPHC_ADDR_03:
+	case LOWPAN_IPHC_SAM_11:
 		/* TODO */
-		netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
+		netdev_warn(skb->dev, "SAM value 0x%x not supported\n",
+			    address_mode);
 		return -EINVAL;
 	default:
-		pr_debug("Invalid sam value: 0x%x\n", sam);
+		pr_debug("Invalid sam value: 0x%x\n", address_mode);
 		return -EINVAL;
 	}
 
@@ -174,11 +295,11 @@
  */
 static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
 					     struct in6_addr *ipaddr,
-					     const u8 dam)
+					     u8 address_mode)
 {
 	bool fail;
 
-	switch (dam) {
+	switch (address_mode) {
 	case LOWPAN_IPHC_DAM_00:
 		/* 00:  128 bits.  The full address
 		 * is carried in-line.
@@ -210,7 +331,7 @@
 		fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
 		break;
 	default:
-		pr_debug("DAM value has a wrong value: 0x%x\n", dam);
+		pr_debug("DAM value has a wrong value: 0x%x\n", address_mode);
 		return -EINVAL;
 	}
 
@@ -225,77 +346,142 @@
 	return 0;
 }
 
-/* TTL uncompression values */
-static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 };
+/* get the ecn values from iphc tf format and set it to ipv6hdr */
+static inline void lowpan_iphc_tf_set_ecn(struct ipv6hdr *hdr, const u8 *tf)
+{
+	/* get the two higher bits which is ecn */
+	u8 ecn = tf[0] & 0xc0;
 
-int
-lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
-			 const u8 *saddr, const u8 saddr_type,
-			 const u8 saddr_len, const u8 *daddr,
-			 const u8 daddr_type, const u8 daddr_len,
-			 u8 iphc0, u8 iphc1)
+	/* ECN takes 0x30 in hdr->flow_lbl[0] */
+	hdr->flow_lbl[0] |= (ecn >> 2);
+}
+
+/* get the dscp values from iphc tf format and set it to ipv6hdr */
+static inline void lowpan_iphc_tf_set_dscp(struct ipv6hdr *hdr, const u8 *tf)
+{
+	/* DSCP is at place after ECN */
+	u8 dscp = tf[0] & 0x3f;
+
+	/* The four highest bits need to be set at hdr->priority */
+	hdr->priority |= ((dscp & 0x3c) >> 2);
+	/* The two lower bits is part of hdr->flow_lbl[0] */
+	hdr->flow_lbl[0] |= ((dscp & 0x03) << 6);
+}
+
+/* get the flow label values from iphc tf format and set it to ipv6hdr */
+static inline void lowpan_iphc_tf_set_lbl(struct ipv6hdr *hdr, const u8 *lbl)
+{
+	/* flow label is always some array started with lower nibble of
+	 * flow_lbl[0] and followed with two bytes afterwards. Inside inline
+	 * data the flow_lbl position can be different, which will be handled
+	 * by lbl pointer. E.g. case "01" vs "00" the traffic class is 8 bit
+	 * shifted, the different lbl pointer will handle that.
+	 *
+	 * The flow label will started at lower nibble of flow_lbl[0], the
+	 * higher nibbles are part of DSCP + ECN.
+	 */
+	hdr->flow_lbl[0] |= lbl[0] & 0x0f;
+	memcpy(&hdr->flow_lbl[1], &lbl[1], 2);
+}
+
+/* lowpan_iphc_tf_decompress - decompress the traffic class.
+ *	This function will return zero on success, a value lower than zero if
+ *	failed.
+ */
+static int lowpan_iphc_tf_decompress(struct sk_buff *skb, struct ipv6hdr *hdr,
+				     u8 val)
+{
+	u8 tf[4];
+
+	/* Traffic Class and Flow Label */
+	switch (val) {
+	case LOWPAN_IPHC_TF_00:
+		/* ECN + DSCP + 4-bit Pad + Flow Label (4 bytes) */
+		if (lowpan_fetch_skb(skb, tf, 4))
+			return -EINVAL;
+
+		/*                      1                   2                   3
+		 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+		 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		 * |ECN|   DSCP    |  rsv  |             Flow Label                |
+		 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		 */
+		lowpan_iphc_tf_set_ecn(hdr, tf);
+		lowpan_iphc_tf_set_dscp(hdr, tf);
+		lowpan_iphc_tf_set_lbl(hdr, &tf[1]);
+		break;
+	case LOWPAN_IPHC_TF_01:
+		/* ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided. */
+		if (lowpan_fetch_skb(skb, tf, 3))
+			return -EINVAL;
+
+		/*                     1                   2
+		 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+		 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		 * |ECN|rsv|             Flow Label                |
+		 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		 */
+		lowpan_iphc_tf_set_ecn(hdr, tf);
+		lowpan_iphc_tf_set_lbl(hdr, &tf[0]);
+		break;
+	case LOWPAN_IPHC_TF_10:
+		/* ECN + DSCP (1 byte), Flow Label is elided. */
+		if (lowpan_fetch_skb(skb, tf, 1))
+			return -EINVAL;
+
+		/*  0 1 2 3 4 5 6 7
+		 * +-+-+-+-+-+-+-+-+
+		 * |ECN|   DSCP    |
+		 * +-+-+-+-+-+-+-+-+
+		 */
+		lowpan_iphc_tf_set_ecn(hdr, tf);
+		lowpan_iphc_tf_set_dscp(hdr, tf);
+		break;
+	case LOWPAN_IPHC_TF_11:
+		/* Traffic Class and Flow Label are elided */
+		break;
+	default:
+		WARN_ON_ONCE(1);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* TTL uncompression values */
+static const u8 lowpan_ttl_values[] = {
+	[LOWPAN_IPHC_HLIM_01] = 1,
+	[LOWPAN_IPHC_HLIM_10] = 64,
+	[LOWPAN_IPHC_HLIM_11] = 255,
+};
+
+int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
+			     const void *daddr, const void *saddr)
 {
 	struct ipv6hdr hdr = {};
-	u8 tmp, num_context = 0;
+	u8 iphc0, iphc1;
 	int err;
 
 	raw_dump_table(__func__, "raw skb data dump uncompressed",
 		       skb->data, skb->len);
 
+	if (lowpan_fetch_skb(skb, &iphc0, sizeof(iphc0)) ||
+	    lowpan_fetch_skb(skb, &iphc1, sizeof(iphc1)))
+		return -EINVAL;
+
 	/* another if the CID flag is set */
-	if (iphc1 & LOWPAN_IPHC_CID) {
-		pr_debug("CID flag is set, increase header with one\n");
-		if (lowpan_fetch_skb(skb, &num_context, sizeof(num_context)))
-			return -EINVAL;
-	}
+	if (iphc1 & LOWPAN_IPHC_CID)
+		return -ENOTSUPP;
 
 	hdr.version = 6;
 
-	/* Traffic Class and Flow Label */
-	switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
-	/* Traffic Class and FLow Label carried in-line
-	 * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
-	 */
-	case 0: /* 00b */
-		if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
-			return -EINVAL;
-
-		memcpy(&hdr.flow_lbl, &skb->data[0], 3);
-		skb_pull(skb, 3);
-		hdr.priority = ((tmp >> 2) & 0x0f);
-		hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
-					(hdr.flow_lbl[0] & 0x0f);
-		break;
-	/* Traffic class carried in-line
-	 * ECN + DSCP (1 byte), Flow Label is elided
-	 */
-	case 2: /* 10b */
-		if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
-			return -EINVAL;
-
-		hdr.priority = ((tmp >> 2) & 0x0f);
-		hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
-		break;
-	/* Flow Label carried in-line
-	 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
-	 */
-	case 1: /* 01b */
-		if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
-			return -EINVAL;
-
-		hdr.flow_lbl[0] = (tmp & 0x0F) | ((tmp >> 2) & 0x30);
-		memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
-		skb_pull(skb, 2);
-		break;
-	/* Traffic Class and Flow Label are elided */
-	case 3: /* 11b */
-		break;
-	default:
-		break;
-	}
+	err = lowpan_iphc_tf_decompress(skb, &hdr,
+					iphc0 & LOWPAN_IPHC_TF_MASK);
+	if (err < 0)
+		return err;
 
 	/* Next Header */
-	if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
+	if (!(iphc0 & LOWPAN_IPHC_NH)) {
 		/* Next header is carried inline */
 		if (lowpan_fetch_skb(skb, &hdr.nexthdr, sizeof(hdr.nexthdr)))
 			return -EINVAL;
@@ -305,35 +491,30 @@
 	}
 
 	/* Hop Limit */
-	if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) {
-		hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
+	if ((iphc0 & LOWPAN_IPHC_HLIM_MASK) != LOWPAN_IPHC_HLIM_00) {
+		hdr.hop_limit = lowpan_ttl_values[iphc0 & LOWPAN_IPHC_HLIM_MASK];
 	} else {
 		if (lowpan_fetch_skb(skb, &hdr.hop_limit,
 				     sizeof(hdr.hop_limit)))
 			return -EINVAL;
 	}
 
-	/* Extract SAM to the tmp variable */
-	tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
-
 	if (iphc1 & LOWPAN_IPHC_SAC) {
 		/* Source address context based uncompression */
 		pr_debug("SAC bit is set. Handle context based source address.\n");
-		err = uncompress_context_based_src_addr(skb, &hdr.saddr, tmp);
+		err = uncompress_context_based_src_addr(skb, &hdr.saddr,
+							iphc1 & LOWPAN_IPHC_SAM_MASK);
 	} else {
 		/* Source address uncompression */
 		pr_debug("source address stateless compression\n");
-		err = uncompress_addr(skb, &hdr.saddr, tmp, saddr,
-				      saddr_type, saddr_len);
+		err = uncompress_addr(skb, dev, &hdr.saddr,
+				      iphc1 & LOWPAN_IPHC_SAM_MASK, saddr);
 	}
 
 	/* Check on error of previous branch */
 	if (err)
 		return -EINVAL;
 
-	/* Extract DAM to the tmp variable */
-	tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
-
 	/* check for Multicast Compression */
 	if (iphc1 & LOWPAN_IPHC_M) {
 		if (iphc1 & LOWPAN_IPHC_DAC) {
@@ -341,22 +522,22 @@
 			/* TODO: implement this */
 		} else {
 			err = lowpan_uncompress_multicast_daddr(skb, &hdr.daddr,
-								tmp);
+								iphc1 & LOWPAN_IPHC_DAM_MASK);
 
 			if (err)
 				return -EINVAL;
 		}
 	} else {
-		err = uncompress_addr(skb, &hdr.daddr, tmp, daddr,
-				      daddr_type, daddr_len);
+		err = uncompress_addr(skb, dev, &hdr.daddr,
+				      iphc1 & LOWPAN_IPHC_DAM_MASK, daddr);
 		pr_debug("dest: stateless compression mode %d dest %pI6c\n",
-			 tmp, &hdr.daddr);
+			 iphc1 & LOWPAN_IPHC_DAM_MASK, &hdr.daddr);
 		if (err)
 			return -EINVAL;
 	}
 
 	/* Next header data uncompression */
-	if (iphc0 & LOWPAN_IPHC_NH_C) {
+	if (iphc0 & LOWPAN_IPHC_NH) {
 		err = lowpan_nhc_do_uncompression(skb, dev, &hdr);
 		if (err < 0)
 			return err;
@@ -397,42 +578,176 @@
 }
 EXPORT_SYMBOL_GPL(lowpan_header_decompress);
 
-static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift,
-				  const struct in6_addr *ipaddr,
-				  const unsigned char *lladdr)
+static const u8 lowpan_iphc_dam_to_sam_value[] = {
+	[LOWPAN_IPHC_DAM_00] = LOWPAN_IPHC_SAM_00,
+	[LOWPAN_IPHC_DAM_01] = LOWPAN_IPHC_SAM_01,
+	[LOWPAN_IPHC_DAM_10] = LOWPAN_IPHC_SAM_10,
+	[LOWPAN_IPHC_DAM_11] = LOWPAN_IPHC_SAM_11,
+};
+
+static u8 lowpan_compress_addr_64(u8 **hc_ptr, const struct in6_addr *ipaddr,
+				  const unsigned char *lladdr, bool sam)
 {
-	u8 val = 0;
+	u8 dam = LOWPAN_IPHC_DAM_00;
 
 	if (is_addr_mac_addr_based(ipaddr, lladdr)) {
-		val = 3; /* 0-bits */
+		dam = LOWPAN_IPHC_DAM_11; /* 0-bits */
 		pr_debug("address compression 0 bits\n");
 	} else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
 		/* compress IID to 16 bits xxxx::XXXX */
 		lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[7], 2);
-		val = 2; /* 16-bits */
+		dam = LOWPAN_IPHC_DAM_10; /* 16-bits */
 		raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)",
 				*hc_ptr - 2, 2);
 	} else {
 		/* do not compress IID => xxxx::IID */
 		lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[4], 8);
-		val = 1; /* 64-bits */
+		dam = LOWPAN_IPHC_DAM_01; /* 64-bits */
 		raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)",
 				*hc_ptr - 8, 8);
 	}
 
-	return rol8(val, shift);
+	if (sam)
+		return lowpan_iphc_dam_to_sam_value[dam];
+	else
+		return dam;
 }
 
-int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
-			   unsigned short type, const void *_daddr,
-			   const void *_saddr, unsigned int len)
+/* lowpan_iphc_get_tc - get the ECN + DCSP fields in hc format */
+static inline u8 lowpan_iphc_get_tc(const struct ipv6hdr *hdr)
 {
-	u8 tmp, iphc0, iphc1, *hc_ptr;
+	u8 dscp, ecn;
+
+	/* hdr->priority contains the higher bits of dscp, lower are part of
+	 * flow_lbl[0]. Note ECN, DCSP is swapped in ipv6 hdr.
+	 */
+	dscp = (hdr->priority << 2) | ((hdr->flow_lbl[0] & 0xc0) >> 6);
+	/* ECN is at the two lower bits from first nibble of flow_lbl[0] */
+	ecn = (hdr->flow_lbl[0] & 0x30);
+	/* for pretty debug output, also shift ecn to get the ecn value */
+	pr_debug("ecn 0x%02x dscp 0x%02x\n", ecn >> 4, dscp);
+	/* ECN is at 0x30 now, shift it to have ECN + DCSP */
+	return (ecn << 2) | dscp;
+}
+
+/* lowpan_iphc_is_flow_lbl_zero - check if flow label is zero */
+static inline bool lowpan_iphc_is_flow_lbl_zero(const struct ipv6hdr *hdr)
+{
+	return ((!(hdr->flow_lbl[0] & 0x0f)) &&
+		!hdr->flow_lbl[1] && !hdr->flow_lbl[2]);
+}
+
+/* lowpan_iphc_tf_compress - compress the traffic class which is set by
+ *	ipv6hdr. Return the corresponding format identifier which is used.
+ */
+static u8 lowpan_iphc_tf_compress(u8 **hc_ptr, const struct ipv6hdr *hdr)
+{
+	/* get ecn dscp data in a byteformat as: ECN(hi) + DSCP(lo) */
+	u8 tc = lowpan_iphc_get_tc(hdr), tf[4], val;
+
+	/* printout the traffic class in hc format */
+	pr_debug("tc 0x%02x\n", tc);
+
+	if (lowpan_iphc_is_flow_lbl_zero(hdr)) {
+		if (!tc) {
+			/* 11:  Traffic Class and Flow Label are elided. */
+			val = LOWPAN_IPHC_TF_11;
+		} else {
+			/* 10:  ECN + DSCP (1 byte), Flow Label is elided.
+			 *
+			 *  0 1 2 3 4 5 6 7
+			 * +-+-+-+-+-+-+-+-+
+			 * |ECN|   DSCP    |
+			 * +-+-+-+-+-+-+-+-+
+			 */
+			lowpan_push_hc_data(hc_ptr, &tc, sizeof(tc));
+			val = LOWPAN_IPHC_TF_10;
+		}
+	} else {
+		/* check if dscp is zero, it's after the first two bit */
+		if (!(tc & 0x3f)) {
+			/* 01:  ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
+			 *
+			 *                     1                   2
+			 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+			 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+			 * |ECN|rsv|             Flow Label                |
+			 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+			 */
+			memcpy(&tf[0], &hdr->flow_lbl[0], 3);
+			/* zero the highest 4-bits, contains DCSP + ECN */
+			tf[0] &= ~0xf0;
+			/* set ECN */
+			tf[0] |= (tc & 0xc0);
+
+			lowpan_push_hc_data(hc_ptr, tf, 3);
+			val = LOWPAN_IPHC_TF_01;
+		} else {
+			/* 00:  ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
+			 *
+			 *                      1                   2                   3
+			 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+			 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+			 * |ECN|   DSCP    |  rsv  |             Flow Label                |
+			 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+			 */
+			memcpy(&tf[0], &tc, sizeof(tc));
+			/* highest nibble of flow_lbl[0] is part of DSCP + ECN
+			 * which will be the 4-bit pad and will be filled with
+			 * zeros afterwards.
+			 */
+			memcpy(&tf[1], &hdr->flow_lbl[0], 3);
+			/* zero the 4-bit pad, which is reserved */
+			tf[1] &= ~0xf0;
+
+			lowpan_push_hc_data(hc_ptr, tf, 4);
+			val = LOWPAN_IPHC_TF_00;
+		}
+	}
+
+	return val;
+}
+
+static u8 lowpan_iphc_mcast_addr_compress(u8 **hc_ptr,
+					  const struct in6_addr *ipaddr)
+{
+	u8 val;
+
+	if (lowpan_is_mcast_addr_compressable8(ipaddr)) {
+		pr_debug("compressed to 1 octet\n");
+		/* use last byte */
+		lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[15], 1);
+		val = LOWPAN_IPHC_DAM_11;
+	} else if (lowpan_is_mcast_addr_compressable32(ipaddr)) {
+		pr_debug("compressed to 4 octets\n");
+		/* second byte + the last three */
+		lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[1], 1);
+		lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[13], 3);
+		val = LOWPAN_IPHC_DAM_10;
+	} else if (lowpan_is_mcast_addr_compressable48(ipaddr)) {
+		pr_debug("compressed to 6 octets\n");
+		/* second byte + the last five */
+		lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[1], 1);
+		lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[11], 5);
+		val = LOWPAN_IPHC_DAM_01;
+	} else {
+		pr_debug("using full address\n");
+		lowpan_push_hc_data(hc_ptr, ipaddr->s6_addr, 16);
+		val = LOWPAN_IPHC_DAM_00;
+	}
+
+	return val;
+}
+
+int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
+			   const void *daddr, const void *saddr)
+{
+	u8 iphc0, iphc1, *hc_ptr;
 	struct ipv6hdr *hdr;
-	u8 head[100] = {};
+	u8 head[LOWPAN_IPHC_MAX_HC_BUF_LEN] = {};
 	int ret, addr_type;
 
-	if (type != ETH_P_IPV6)
+	if (skb->protocol != htons(ETH_P_IPV6))
 		return -EINVAL;
 
 	hdr = ipv6_hdr(skb);
@@ -456,63 +771,26 @@
 
 	/* TODO: context lookup */
 
-	raw_dump_inline(__func__, "saddr",
-			(unsigned char *)_saddr, IEEE802154_ADDR_LEN);
-	raw_dump_inline(__func__, "daddr",
-			(unsigned char *)_daddr, IEEE802154_ADDR_LEN);
+	raw_dump_inline(__func__, "saddr", saddr, EUI64_ADDR_LEN);
+	raw_dump_inline(__func__, "daddr", daddr, EUI64_ADDR_LEN);
 
 	raw_dump_table(__func__, "sending raw skb network uncompressed packet",
 		       skb->data, skb->len);
 
-	/* Traffic class, flow label
-	 * If flow label is 0, compress it. If traffic class is 0, compress it
-	 * We have to process both in the same time as the offset of traffic
-	 * class depends on the presence of version and flow label
-	 */
-
-	/* hc format of TC is ECN | DSCP , original one is DSCP | ECN */
-	tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
-	tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
-
-	if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
-	    (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
-		/* flow label can be compressed */
-		iphc0 |= LOWPAN_IPHC_FL_C;
-		if ((hdr->priority == 0) &&
-		    ((hdr->flow_lbl[0] & 0xF0) == 0)) {
-			/* compress (elide) all */
-			iphc0 |= LOWPAN_IPHC_TC_C;
-		} else {
-			/* compress only the flow label */
-			*hc_ptr = tmp;
-			hc_ptr += 1;
-		}
-	} else {
-		/* Flow label cannot be compressed */
-		if ((hdr->priority == 0) &&
-		    ((hdr->flow_lbl[0] & 0xF0) == 0)) {
-			/* compress only traffic class */
-			iphc0 |= LOWPAN_IPHC_TC_C;
-			*hc_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
-			memcpy(hc_ptr + 1, &hdr->flow_lbl[1], 2);
-			hc_ptr += 3;
-		} else {
-			/* compress nothing */
-			memcpy(hc_ptr, hdr, 4);
-			/* replace the top byte with new ECN | DSCP format */
-			*hc_ptr = tmp;
-			hc_ptr += 4;
-		}
-	}
+	/* Traffic Class, Flow Label compression */
+	iphc0 |= lowpan_iphc_tf_compress(&hc_ptr, hdr);
 
 	/* NOTE: payload length is always compressed */
 
 	/* Check if we provide the nhc format for nexthdr and compression
 	 * functionality. If not nexthdr is handled inline and not compressed.
 	 */
-	ret = lowpan_nhc_check_compression(skb, hdr, &hc_ptr, &iphc0);
-	if (ret < 0)
-		return ret;
+	ret = lowpan_nhc_check_compression(skb, hdr, &hc_ptr);
+	if (ret == -ENOENT)
+		lowpan_push_hc_data(&hc_ptr, &hdr->nexthdr,
+				    sizeof(hdr->nexthdr));
+	else
+		iphc0 |= LOWPAN_IPHC_NH;
 
 	/* Hop limit
 	 * if 1:   compress, encoding is 01
@@ -522,13 +800,13 @@
 	 */
 	switch (hdr->hop_limit) {
 	case 1:
-		iphc0 |= LOWPAN_IPHC_TTL_1;
+		iphc0 |= LOWPAN_IPHC_HLIM_01;
 		break;
 	case 64:
-		iphc0 |= LOWPAN_IPHC_TTL_64;
+		iphc0 |= LOWPAN_IPHC_HLIM_10;
 		break;
 	case 255:
-		iphc0 |= LOWPAN_IPHC_TTL_255;
+		iphc0 |= LOWPAN_IPHC_HLIM_11;
 		break;
 	default:
 		lowpan_push_hc_data(&hc_ptr, &hdr->hop_limit,
@@ -542,9 +820,8 @@
 		iphc1 |= LOWPAN_IPHC_SAC;
 	} else {
 		if (addr_type & IPV6_ADDR_LINKLOCAL) {
-			iphc1 |= lowpan_compress_addr_64(&hc_ptr,
-							 LOWPAN_IPHC_SAM_BIT,
-							 &hdr->saddr, _saddr);
+			iphc1 |= lowpan_compress_addr_64(&hc_ptr, &hdr->saddr,
+							 saddr, true);
 			pr_debug("source address unicast link-local %pI6c iphc1 0x%02x\n",
 				 &hdr->saddr, iphc1);
 		} else {
@@ -558,38 +835,12 @@
 	if (addr_type & IPV6_ADDR_MULTICAST) {
 		pr_debug("destination address is multicast: ");
 		iphc1 |= LOWPAN_IPHC_M;
-		if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
-			pr_debug("compressed to 1 octet\n");
-			iphc1 |= LOWPAN_IPHC_DAM_11;
-			/* use last byte */
-			lowpan_push_hc_data(&hc_ptr,
-					    &hdr->daddr.s6_addr[15], 1);
-		} else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
-			pr_debug("compressed to 4 octets\n");
-			iphc1 |= LOWPAN_IPHC_DAM_10;
-			/* second byte + the last three */
-			lowpan_push_hc_data(&hc_ptr,
-					    &hdr->daddr.s6_addr[1], 1);
-			lowpan_push_hc_data(&hc_ptr,
-					    &hdr->daddr.s6_addr[13], 3);
-		} else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
-			pr_debug("compressed to 6 octets\n");
-			iphc1 |= LOWPAN_IPHC_DAM_01;
-			/* second byte + the last five */
-			lowpan_push_hc_data(&hc_ptr,
-					    &hdr->daddr.s6_addr[1], 1);
-			lowpan_push_hc_data(&hc_ptr,
-					    &hdr->daddr.s6_addr[11], 5);
-		} else {
-			pr_debug("using full address\n");
-			iphc1 |= LOWPAN_IPHC_DAM_00;
-			lowpan_push_hc_data(&hc_ptr, hdr->daddr.s6_addr, 16);
-		}
+		iphc1 |= lowpan_iphc_mcast_addr_compress(&hc_ptr, &hdr->daddr);
 	} else {
 		if (addr_type & IPV6_ADDR_LINKLOCAL) {
 			/* TODO: context lookup */
-			iphc1 |= lowpan_compress_addr_64(&hc_ptr,
-				LOWPAN_IPHC_DAM_BIT, &hdr->daddr, _daddr);
+			iphc1 |= lowpan_compress_addr_64(&hc_ptr, &hdr->daddr,
+							 daddr, false);
 			pr_debug("dest address unicast link-local %pI6c "
 				 "iphc1 0x%02x\n", &hdr->daddr, iphc1);
 		} else {
@@ -599,7 +850,7 @@
 	}
 
 	/* next header compression */
-	if (iphc0 & LOWPAN_IPHC_NH_C) {
+	if (iphc0 & LOWPAN_IPHC_NH) {
 		ret = lowpan_nhc_do_compression(skb, hdr, &hc_ptr);
 		if (ret < 0)
 			return ret;
diff --git a/net/6lowpan/nhc.c b/net/6lowpan/nhc.c
index fd20fc5..7008d53 100644
--- a/net/6lowpan/nhc.c
+++ b/net/6lowpan/nhc.c
@@ -95,23 +95,20 @@
 }
 
 int lowpan_nhc_check_compression(struct sk_buff *skb,
-				 const struct ipv6hdr *hdr, u8 **hc_ptr,
-				 u8 *iphc0)
+				 const struct ipv6hdr *hdr, u8 **hc_ptr)
 {
 	struct lowpan_nhc *nhc;
+	int ret = 0;
 
 	spin_lock_bh(&lowpan_nhc_lock);
 
 	nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
-	if (nhc && nhc->compress)
-		*iphc0 |= LOWPAN_IPHC_NH_C;
-	else
-		lowpan_push_hc_data(hc_ptr, &hdr->nexthdr,
-				    sizeof(hdr->nexthdr));
+	if (!(nhc && nhc->compress))
+		ret = -ENOENT;
 
 	spin_unlock_bh(&lowpan_nhc_lock);
 
-	return 0;
+	return ret;
 }
 
 int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
@@ -157,7 +154,8 @@
 	return ret;
 }
 
-int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev,
+int lowpan_nhc_do_uncompression(struct sk_buff *skb,
+				const struct net_device *dev,
 				struct ipv6hdr *hdr)
 {
 	struct lowpan_nhc *nhc;
diff --git a/net/6lowpan/nhc.h b/net/6lowpan/nhc.h
index ed44938..8030414 100644
--- a/net/6lowpan/nhc.h
+++ b/net/6lowpan/nhc.h
@@ -8,8 +8,6 @@
 #include <net/6lowpan.h>
 #include <net/ipv6.h>
 
-#define LOWPAN_NHC_MAX_ID_LEN	1
-
 /**
  * LOWPAN_NHC - helper macro to generate nh id fields and lowpan_nhc struct
  *
@@ -88,19 +86,16 @@
 
 /**
  * lowpan_nhc_check_compression - checks if we support compression format. If
- *	we support the nhc by nexthdr field, the 6LoWPAN iphc NHC bit will be
- *	set. If we don't support nexthdr will be added as inline data to the
- *	6LoWPAN header.
+ *	we support the nhc by nexthdr field, the function will return 0. If we
+ *	don't support the nhc by nexthdr this function will return -ENOENT.
  *
  * @skb: skb of 6LoWPAN header to read nhc and replace header.
  * @hdr: ipv6hdr to check the nexthdr value
  * @hc_ptr: pointer for 6LoWPAN header which should increment at the end of
  *	    replaced header.
- * @iphc0: iphc0 pointer to set the 6LoWPAN NHC bit
  */
 int lowpan_nhc_check_compression(struct sk_buff *skb,
-				 const struct ipv6hdr *hdr, u8 **hc_ptr,
-				 u8 *iphc0);
+				 const struct ipv6hdr *hdr, u8 **hc_ptr);
 
 /**
  * lowpan_nhc_do_compression - calling compress callback for nhc
@@ -121,7 +116,8 @@
  * @dev: netdevice for print logging information.
  * @hdr: ipv6hdr for setting nexthdr value.
  */
-int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev,
+int lowpan_nhc_do_uncompression(struct sk_buff *skb,
+				const struct net_device *dev,
 				struct ipv6hdr *hdr);
 
 /**
diff --git a/net/6lowpan/nhc_udp.c b/net/6lowpan/nhc_udp.c
index 72d0b57..69537a2 100644
--- a/net/6lowpan/nhc_udp.c
+++ b/net/6lowpan/nhc_udp.c
@@ -17,7 +17,27 @@
 
 #include "nhc.h"
 
-#define LOWPAN_NHC_UDP_IDLEN	1
+#define LOWPAN_NHC_UDP_MASK		0xF8
+#define LOWPAN_NHC_UDP_ID		0xF0
+#define LOWPAN_NHC_UDP_IDLEN		1
+
+#define LOWPAN_NHC_UDP_4BIT_PORT	0xF0B0
+#define LOWPAN_NHC_UDP_4BIT_MASK	0xFFF0
+#define LOWPAN_NHC_UDP_8BIT_PORT	0xF000
+#define LOWPAN_NHC_UDP_8BIT_MASK	0xFF00
+
+/* values for port compression, _with checksum_ ie bit 5 set to 0 */
+
+/* all inline */
+#define LOWPAN_NHC_UDP_CS_P_00	0xF0
+/* source 16bit inline, dest = 0xF0 + 8 bit inline */
+#define LOWPAN_NHC_UDP_CS_P_01	0xF1
+/* source = 0xF0 + 8bit inline, dest = 16 bit inline */
+#define LOWPAN_NHC_UDP_CS_P_10	0xF2
+/* source & dest = 0xF0B + 4bit inline */
+#define LOWPAN_NHC_UDP_CS_P_11	0xF3
+/* checksum elided */
+#define LOWPAN_NHC_UDP_CS_C	0x04
 
 static int udp_uncompress(struct sk_buff *skb, size_t needed)
 {
diff --git a/net/Kconfig b/net/Kconfig
index 7021c1b..127da94 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -232,6 +232,7 @@
 source "net/mpls/Kconfig"
 source "net/hsr/Kconfig"
 source "net/switchdev/Kconfig"
+source "net/l3mdev/Kconfig"
 
 config RPS
 	bool
diff --git a/net/Makefile b/net/Makefile
index 3995613..a5d0409 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -74,3 +74,6 @@
 ifneq ($(CONFIG_NET_SWITCHDEV),)
 obj-y				+= switchdev/
 endif
+ifneq ($(CONFIG_NET_L3_MASTER_DEV),)
+obj-y				+= l3mdev/
+endif
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 131e79c..9e9cca3 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -21,8 +21,6 @@
 #include <net/ip6_route.h>
 #include <net/addrconf.h>
 
-#include <net/af_ieee802154.h> /* to get the address type */
-
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
@@ -35,7 +33,6 @@
 static struct dentry *lowpan_control_debugfs;
 
 #define IFACE_NAME_TEMPLATE "bt%d"
-#define EUI64_ADDR_LEN 8
 
 struct skb_cb {
 	struct in6_addr addr;
@@ -266,14 +263,13 @@
 	if (!skb_cp)
 		return NET_RX_DROP;
 
-	return netif_rx(skb_cp);
+	return netif_rx_ni(skb_cp);
 }
 
 static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
 			   struct l2cap_chan *chan)
 {
 	const u8 *saddr, *daddr;
-	u8 iphc0, iphc1;
 	struct lowpan_dev *dev;
 	struct lowpan_peer *peer;
 
@@ -288,22 +284,7 @@
 	saddr = peer->eui64_addr;
 	daddr = dev->netdev->dev_addr;
 
-	/* at least two bytes will be used for the encoding */
-	if (skb->len < 2)
-		return -EINVAL;
-
-	if (lowpan_fetch_skb_u8(skb, &iphc0))
-		return -EINVAL;
-
-	if (lowpan_fetch_skb_u8(skb, &iphc1))
-		return -EINVAL;
-
-	return lowpan_header_decompress(skb, netdev,
-					saddr, IEEE802154_ADDR_LONG,
-					EUI64_ADDR_LEN, daddr,
-					IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
-					iphc0, iphc1);
-
+	return lowpan_header_decompress(skb, netdev, daddr, saddr);
 }
 
 static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
@@ -315,15 +296,17 @@
 	if (!netif_running(dev))
 		goto drop;
 
-	if (dev->type != ARPHRD_6LOWPAN)
+	if (dev->type != ARPHRD_6LOWPAN || !skb->len)
 		goto drop;
 
+	skb_reset_network_header(skb);
+
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	if (!skb)
 		goto drop;
 
 	/* check that it's our buffer */
-	if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
+	if (lowpan_is_ipv6(*skb_network_header(skb))) {
 		/* Copy the packet so that the IPv6 header is
 		 * properly aligned.
 		 */
@@ -335,7 +318,6 @@
 		local_skb->protocol = htons(ETH_P_IPV6);
 		local_skb->pkt_type = PACKET_HOST;
 
-		skb_reset_network_header(local_skb);
 		skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
 
 		if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
@@ -348,38 +330,34 @@
 
 		consume_skb(local_skb);
 		consume_skb(skb);
-	} else {
-		switch (skb->data[0] & 0xe0) {
-		case LOWPAN_DISPATCH_IPHC:	/* ipv6 datagram */
-			local_skb = skb_clone(skb, GFP_ATOMIC);
-			if (!local_skb)
-				goto drop;
+	} else if (lowpan_is_iphc(*skb_network_header(skb))) {
+		local_skb = skb_clone(skb, GFP_ATOMIC);
+		if (!local_skb)
+			goto drop;
 
-			ret = iphc_decompress(local_skb, dev, chan);
-			if (ret < 0) {
-				kfree_skb(local_skb);
-				goto drop;
-			}
-
-			local_skb->protocol = htons(ETH_P_IPV6);
-			local_skb->pkt_type = PACKET_HOST;
-			local_skb->dev = dev;
-
-			if (give_skb_to_upper(local_skb, dev)
-					!= NET_RX_SUCCESS) {
-				kfree_skb(local_skb);
-				goto drop;
-			}
-
-			dev->stats.rx_bytes += skb->len;
-			dev->stats.rx_packets++;
-
-			consume_skb(local_skb);
-			consume_skb(skb);
-			break;
-		default:
-			break;
+		ret = iphc_decompress(local_skb, dev, chan);
+		if (ret < 0) {
+			kfree_skb(local_skb);
+			goto drop;
 		}
+
+		local_skb->protocol = htons(ETH_P_IPV6);
+		local_skb->pkt_type = PACKET_HOST;
+		local_skb->dev = dev;
+
+		if (give_skb_to_upper(local_skb, dev)
+				!= NET_RX_SUCCESS) {
+			kfree_skb(local_skb);
+			goto drop;
+		}
+
+		dev->stats.rx_bytes += skb->len;
+		dev->stats.rx_packets++;
+
+		consume_skb(local_skb);
+		consume_skb(skb);
+	} else {
+		goto drop;
 	}
 
 	return NET_RX_SUCCESS;
@@ -493,8 +471,7 @@
 		status = 1;
 	}
 
-	lowpan_header_compress(skb, netdev, ETH_P_IPV6, daddr,
-			       dev->netdev->dev_addr, skb->len);
+	lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
 
 	err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
 	if (err < 0)
@@ -674,13 +651,8 @@
 
 static void netdev_setup(struct net_device *dev)
 {
-	dev->addr_len		= EUI64_ADDR_LEN;
-	dev->type		= ARPHRD_6LOWPAN;
-
 	dev->hard_header_len	= 0;
 	dev->needed_tailroom	= 0;
-	dev->mtu		= IPV6_MIN_MTU;
-	dev->tx_queue_len	= 0;
 	dev->flags		= IFF_RUNNING | IFF_POINTOPOINT |
 				  IFF_MULTICAST;
 	dev->watchdog_timeo	= 0;
@@ -775,24 +747,7 @@
 
 	chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
 	chan->mode = L2CAP_MODE_LE_FLOWCTL;
-	chan->omtu = 65535;
-	chan->imtu = chan->omtu;
-
-	return chan;
-}
-
-static struct l2cap_chan *chan_open(struct l2cap_chan *pchan)
-{
-	struct l2cap_chan *chan;
-
-	chan = chan_create();
-	if (!chan)
-		return NULL;
-
-	chan->remote_mps = chan->omtu;
-	chan->mps = chan->omtu;
-
-	chan->state = BT_CONNECTED;
+	chan->imtu = 1280;
 
 	return chan;
 }
@@ -919,7 +874,10 @@
 {
 	struct l2cap_chan *chan;
 
-	chan = chan_open(pchan);
+	chan = chan_create();
+	if (!chan)
+		return NULL;
+
 	chan->ops = pchan->ops;
 
 	BT_DBG("chan %p pchan %p", chan, pchan);
@@ -1065,34 +1023,23 @@
 		return BDADDR_LE_RANDOM;
 }
 
-static struct l2cap_chan *chan_get(void)
-{
-	struct l2cap_chan *pchan;
-
-	pchan = chan_create();
-	if (!pchan)
-		return NULL;
-
-	pchan->ops = &bt_6lowpan_chan_ops;
-
-	return pchan;
-}
-
 static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
 {
-	struct l2cap_chan *pchan;
+	struct l2cap_chan *chan;
 	int err;
 
-	pchan = chan_get();
-	if (!pchan)
+	chan = chan_create();
+	if (!chan)
 		return -EINVAL;
 
-	err = l2cap_chan_connect(pchan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
+	chan->ops = &bt_6lowpan_chan_ops;
+
+	err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
 				 addr, dst_type);
 
-	BT_DBG("chan %p err %d", pchan, err);
+	BT_DBG("chan %p err %d", chan, err);
 	if (err < 0)
-		l2cap_chan_put(pchan);
+		l2cap_chan_put(chan);
 
 	return err;
 }
@@ -1117,31 +1064,32 @@
 static struct l2cap_chan *bt_6lowpan_listen(void)
 {
 	bdaddr_t *addr = BDADDR_ANY;
-	struct l2cap_chan *pchan;
+	struct l2cap_chan *chan;
 	int err;
 
 	if (!enable_6lowpan)
 		return NULL;
 
-	pchan = chan_get();
-	if (!pchan)
+	chan = chan_create();
+	if (!chan)
 		return NULL;
 
-	pchan->state = BT_LISTEN;
-	pchan->src_type = BDADDR_LE_PUBLIC;
+	chan->ops = &bt_6lowpan_chan_ops;
+	chan->state = BT_LISTEN;
+	chan->src_type = BDADDR_LE_PUBLIC;
 
-	atomic_set(&pchan->nesting, L2CAP_NESTING_PARENT);
+	atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
 
-	BT_DBG("chan %p src type %d", pchan, pchan->src_type);
+	BT_DBG("chan %p src type %d", chan, chan->src_type);
 
-	err = l2cap_add_psm(pchan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
+	err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
 	if (err) {
-		l2cap_chan_put(pchan);
+		l2cap_chan_put(chan);
 		BT_ERR("psm cannot be added err %d", err);
 		return NULL;
 	}
 
-	return pchan;
+	return chan;
 }
 
 static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
@@ -1165,7 +1113,7 @@
 		return -ENOENT;
 
 	hci_dev_lock(hdev);
-	hcon = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+	hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
 	hci_dev_unlock(hdev);
 
 	if (!hcon)
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 70f9d94..a3bffd1 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -33,7 +33,7 @@
 
 #include "selftest.h"
 
-#define VERSION "2.20"
+#define VERSION "2.21"
 
 /* Bluetooth sockets */
 #define BT_MAX_PROTO	8
@@ -221,7 +221,7 @@
 
 	BT_DBG("sock %p sk %p len %zu", sock, sk, len);
 
-	if (flags & (MSG_OOB))
+	if (flags & MSG_OOB)
 		return -EOPNOTSUPP;
 
 	skb = skb_recv_datagram(sk, flags, noblock, &err);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b4548c73..85b82f7 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -59,15 +59,11 @@
 	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
 };
 
-static void hci_le_create_connection_cancel(struct hci_conn *conn)
-{
-	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
-}
-
 /* This function requires the caller holds hdev->lock */
 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
 {
 	struct hci_conn_params *params;
+	struct hci_dev *hdev = conn->hdev;
 	struct smp_irk *irk;
 	bdaddr_t *bdaddr;
 	u8 bdaddr_type;
@@ -76,14 +72,15 @@
 	bdaddr_type = conn->dst_type;
 
 	/* Check if we need to convert to identity address */
-	irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type);
+	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
 	if (irk) {
 		bdaddr = &irk->bdaddr;
 		bdaddr_type = irk->addr_type;
 	}
 
-	params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type);
-	if (!params)
+	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
+					   bdaddr_type);
+	if (!params || !params->explicit_connect)
 		return;
 
 	/* The connection attempt was doing scan for new RPA, and is
@@ -91,19 +88,97 @@
 	 * autoconnect action, remove them completely. If they are, just unmark
 	 * them as waiting for connection, by clearing explicit_connect field.
 	 */
-	if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT)
-		hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
-	else
-		params->explicit_connect = false;
+	params->explicit_connect = false;
+
+	list_del_init(&params->action);
+
+	switch (params->auto_connect) {
+	case HCI_AUTO_CONN_EXPLICIT:
+		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
+		/* return instead of break to avoid duplicate scan update */
+		return;
+	case HCI_AUTO_CONN_DIRECT:
+	case HCI_AUTO_CONN_ALWAYS:
+		list_add(&params->action, &hdev->pend_le_conns);
+		break;
+	case HCI_AUTO_CONN_REPORT:
+		list_add(&params->action, &hdev->pend_le_reports);
+		break;
+	default:
+		break;
+	}
+
+	hci_update_background_scan(hdev);
 }
 
-/* This function requires the caller holds hdev->lock */
+static void hci_conn_cleanup(struct hci_conn *conn)
+{
+	struct hci_dev *hdev = conn->hdev;
+
+	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
+		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
+
+	hci_chan_list_flush(conn);
+
+	hci_conn_hash_del(hdev, conn);
+
+	if (hdev->notify)
+		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
+
+	hci_conn_del_sysfs(conn);
+
+	debugfs_remove_recursive(conn->debugfs);
+
+	hci_dev_put(hdev);
+
+	hci_conn_put(conn);
+}
+
+static void le_scan_cleanup(struct work_struct *work)
+{
+	struct hci_conn *conn = container_of(work, struct hci_conn,
+					     le_scan_cleanup);
+	struct hci_dev *hdev = conn->hdev;
+	struct hci_conn *c = NULL;
+
+	BT_DBG("%s hcon %p", hdev->name, conn);
+
+	hci_dev_lock(hdev);
+
+	/* Check that the hci_conn is still around */
+	rcu_read_lock();
+	list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
+		if (c == conn)
+			break;
+	}
+	rcu_read_unlock();
+
+	if (c == conn) {
+		hci_connect_le_scan_cleanup(conn);
+		hci_conn_cleanup(conn);
+	}
+
+	hci_dev_unlock(hdev);
+	hci_dev_put(hdev);
+	hci_conn_put(conn);
+}
+
 static void hci_connect_le_scan_remove(struct hci_conn *conn)
 {
-	hci_connect_le_scan_cleanup(conn);
+	BT_DBG("%s hcon %p", conn->hdev->name, conn);
 
-	hci_conn_hash_del(conn->hdev, conn);
-	hci_update_background_scan(conn->hdev);
+	/* We can't call hci_conn_del/hci_conn_cleanup here since that
+	 * could deadlock with another hci_conn_del() call that's holding
+	 * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
+	 * Instead, grab temporary extra references to the hci_dev and
+	 * hci_conn and perform the necessary cleanup in a separate work
+	 * callback.
+	 */
+
+	hci_dev_hold(conn->hdev);
+	hci_conn_get(conn);
+
+	schedule_work(&conn->le_scan_cleanup);
 }
 
 static void hci_acl_create_connection(struct hci_conn *conn)
@@ -149,33 +224,8 @@
 	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
 }
 
-static void hci_acl_create_connection_cancel(struct hci_conn *conn)
-{
-	struct hci_cp_create_conn_cancel cp;
-
-	BT_DBG("hcon %p", conn);
-
-	if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
-		return;
-
-	bacpy(&cp.bdaddr, &conn->dst);
-	hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
-}
-
-static void hci_reject_sco(struct hci_conn *conn)
-{
-	struct hci_cp_reject_sync_conn_req cp;
-
-	cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
-	bacpy(&cp.bdaddr, &conn->dst);
-
-	hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
-}
-
 int hci_disconnect(struct hci_conn *conn, __u8 reason)
 {
-	struct hci_cp_disconnect cp;
-
 	BT_DBG("hcon %p", conn);
 
 	/* When we are master of an established connection and it enters
@@ -183,7 +233,8 @@
 	 * current clock offset.  Processing of the result is done
 	 * within the event handling and hci_clock_offset_evt function.
 	 */
-	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
+	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
+	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
 		struct hci_dev *hdev = conn->hdev;
 		struct hci_cp_read_clock_offset clkoff_cp;
 
@@ -192,25 +243,7 @@
 			     &clkoff_cp);
 	}
 
-	conn->state = BT_DISCONN;
-
-	cp.handle = cpu_to_le16(conn->handle);
-	cp.reason = reason;
-	return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
-}
-
-static void hci_amp_disconn(struct hci_conn *conn)
-{
-	struct hci_cp_disconn_phy_link cp;
-
-	BT_DBG("hcon %p", conn);
-
-	conn->state = BT_DISCONN;
-
-	cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
-	cp.reason = hci_proto_disconn_ind(conn);
-	hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
-		     sizeof(cp), &cp);
+	return hci_abort_conn(conn, reason);
 }
 
 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
@@ -376,35 +409,14 @@
 	if (refcnt > 0)
 		return;
 
-	switch (conn->state) {
-	case BT_CONNECT:
-	case BT_CONNECT2:
-		if (conn->out) {
-			if (conn->type == ACL_LINK)
-				hci_acl_create_connection_cancel(conn);
-			else if (conn->type == LE_LINK) {
-				if (test_bit(HCI_CONN_SCANNING, &conn->flags))
-					hci_connect_le_scan_remove(conn);
-				else
-					hci_le_create_connection_cancel(conn);
-			}
-		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
-			hci_reject_sco(conn);
-		}
-		break;
-	case BT_CONFIG:
-	case BT_CONNECTED:
-		if (conn->type == AMP_LINK) {
-			hci_amp_disconn(conn);
-		} else {
-			__u8 reason = hci_proto_disconn_ind(conn);
-			hci_disconnect(conn, reason);
-		}
-		break;
-	default:
-		conn->state = BT_CLOSED;
-		break;
+	/* LE connections in scanning state need special handling */
+	if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
+	    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
+		hci_connect_le_scan_remove(conn);
+		return;
 	}
+
+	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
 }
 
 /* Enter sniff mode */
@@ -472,7 +484,7 @@
 		return;
 	}
 
-	hci_le_create_connection_cancel(conn);
+	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
 }
 
 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
@@ -535,6 +547,7 @@
 	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
 	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
 	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
+	INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
 
 	atomic_set(&conn->refcnt, 0);
 
@@ -581,27 +594,17 @@
 		}
 	}
 
-	hci_chan_list_flush(conn);
-
 	if (conn->amp_mgr)
 		amp_mgr_put(conn->amp_mgr);
 
-	hci_conn_hash_del(hdev, conn);
-	if (hdev->notify)
-		hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
-
 	skb_queue_purge(&conn->data_q);
 
-	hci_conn_del_sysfs(conn);
-
-	debugfs_remove_recursive(conn->debugfs);
-
-	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
-		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
-
-	hci_dev_put(hdev);
-
-	hci_conn_put(conn);
+	/* Remove the connection from the list and cleanup its remaining
+	 * state. This is a separate function since for some cases like
+	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
+	 * rest of hci_conn_del.
+	 */
+	hci_conn_cleanup(conn);
 
 	return 0;
 }
@@ -800,7 +803,7 @@
 	 * attempt, we simply update pending_sec_level and auth_type fields
 	 * and return the object found.
 	 */
-	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
 	conn_unfinished = NULL;
 	if (conn) {
 		if (conn->state == BT_CONNECT &&
@@ -950,13 +953,10 @@
 {
 	struct hci_conn *conn;
 
-	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+	conn = hci_conn_hash_lookup_le(hdev, addr, type);
 	if (!conn)
 		return false;
 
-	if (conn->dst_type != type)
-		return false;
-
 	if (conn->state != BT_CONNECTED)
 		return false;
 
@@ -973,15 +973,23 @@
 	if (is_connected(hdev, addr, addr_type))
 		return -EISCONN;
 
-	params = hci_conn_params_add(hdev, addr, addr_type);
-	if (!params)
-		return -EIO;
+	params = hci_conn_params_lookup(hdev, addr, addr_type);
+	if (!params) {
+		params = hci_conn_params_add(hdev, addr, addr_type);
+		if (!params)
+			return -ENOMEM;
 
-	/* If we created new params, or existing params were marked as disabled,
-	 * mark them to be used just once to connect.
-	 */
-	if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+		/* If we created new params, mark them to be deleted in
+		 * hci_connect_le_scan_cleanup. It's different case than
+		 * existing disabled params, those will stay after cleanup.
+		 */
 		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+	}
+
+	/* We're trying to connect, so make sure params are at pend_le_conns */
+	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
+	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
+	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
 		list_del_init(&params->action);
 		list_add(&params->action, &hdev->pend_le_conns);
 	}
@@ -1021,7 +1029,7 @@
 	 * attempt, we simply update pending_sec_level and auth_type fields
 	 * and return the object found.
 	 */
-	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
 	if (conn) {
 		if (conn->pending_sec_level < sec_level)
 			conn->pending_sec_level = sec_level;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index a7cdd99..83a6aac 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -65,13 +65,6 @@
 #define hci_req_lock(d)		mutex_lock(&d->req_lock)
 #define hci_req_unlock(d)	mutex_unlock(&d->req_lock)
 
-/* ---- HCI notifications ---- */
-
-static void hci_notify(struct hci_dev *hdev, int event)
-{
-	hci_sock_dev_event(hdev, event);
-}
-
 /* ---- HCI debugfs entries ---- */
 
 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
@@ -134,6 +127,77 @@
 	.llseek		= default_llseek,
 };
 
+static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct hci_dev *hdev = file->private_data;
+	char buf[3];
+
+	buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
+	buf[1] = '\n';
+	buf[2] = '\0';
+	return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
+				 size_t count, loff_t *ppos)
+{
+	struct hci_dev *hdev = file->private_data;
+	char buf[32];
+	size_t buf_size = min(count, (sizeof(buf)-1));
+	bool enable;
+	int err;
+
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	buf[buf_size] = '\0';
+	if (strtobool(buf, &enable))
+		return -EINVAL;
+
+	/* When the diagnostic flags are not persistent and the transport
+	 * is not active, then there is no need for the vendor callback.
+	 *
+	 * Instead just store the desired value. If needed the setting
+	 * will be programmed when the controller gets powered on.
+	 */
+	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
+	    !test_bit(HCI_RUNNING, &hdev->flags))
+		goto done;
+
+	hci_req_lock(hdev);
+	err = hdev->set_diag(hdev, enable);
+	hci_req_unlock(hdev);
+
+	if (err < 0)
+		return err;
+
+done:
+	if (enable)
+		hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
+	else
+		hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
+
+	return count;
+}
+
+static const struct file_operations vendor_diag_fops = {
+	.open		= simple_open,
+	.read		= vendor_diag_read,
+	.write		= vendor_diag_write,
+	.llseek		= default_llseek,
+};
+
+static void hci_debugfs_create_basic(struct hci_dev *hdev)
+{
+	debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
+			    &dut_mode_fops);
+
+	if (hdev->set_diag)
+		debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
+				    &vendor_diag_fops);
+}
+
 /* ---- HCI requests ---- */
 
 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
@@ -850,13 +914,8 @@
 	if (err < 0)
 		return err;
 
-	/* The Device Under Test (DUT) mode is special and available for
-	 * all controller types. So just create it early on.
-	 */
-	if (hci_dev_test_flag(hdev, HCI_SETUP)) {
-		debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
-				    &dut_mode_fops);
-	}
+	if (hci_dev_test_flag(hdev, HCI_SETUP))
+		hci_debugfs_create_basic(hdev);
 
 	err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
 	if (err < 0)
@@ -933,6 +992,9 @@
 	if (err < 0)
 		return err;
 
+	if (hci_dev_test_flag(hdev, HCI_SETUP))
+		hci_debugfs_create_basic(hdev);
+
 	return 0;
 }
 
@@ -1385,10 +1447,15 @@
 		goto done;
 	}
 
+	set_bit(HCI_RUNNING, &hdev->flags);
+	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
+
 	atomic_set(&hdev->cmd_cnt, 1);
 	set_bit(HCI_INIT, &hdev->flags);
 
 	if (hci_dev_test_flag(hdev, HCI_SETUP)) {
+		hci_sock_dev_event(hdev, HCI_DEV_SETUP);
+
 		if (hdev->setup)
 			ret = hdev->setup(hdev);
 
@@ -1429,17 +1496,28 @@
 
 	if (!ret) {
 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
-		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
+		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
 			ret = __hci_init(hdev);
+			if (!ret && hdev->post_init)
+				ret = hdev->post_init(hdev);
+		}
 	}
 
+	/* If the HCI Reset command is clearing all diagnostic settings,
+	 * then they need to be reprogrammed after the init procedure
+	 * completed.
+	 */
+	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
+	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
+		ret = hdev->set_diag(hdev, true);
+
 	clear_bit(HCI_INIT, &hdev->flags);
 
 	if (!ret) {
 		hci_dev_hold(hdev);
 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
 		set_bit(HCI_UP, &hdev->flags);
-		hci_notify(hdev, HCI_DEV_UP);
+		hci_sock_dev_event(hdev, HCI_DEV_UP);
 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
@@ -1466,6 +1544,9 @@
 			hdev->sent_cmd = NULL;
 		}
 
+		clear_bit(HCI_RUNNING, &hdev->flags);
+		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+
 		hdev->close(hdev);
 		hdev->flags &= BIT(HCI_RAW);
 	}
@@ -1551,6 +1632,8 @@
 
 int hci_dev_do_close(struct hci_dev *hdev)
 {
+	bool auto_off;
+
 	BT_DBG("%s %p", hdev->name, hdev);
 
 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
@@ -1606,10 +1689,10 @@
 
 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 
-	if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
-		if (hdev->dev_type == HCI_BREDR)
-			mgmt_powered(hdev, 0);
-	}
+	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
+
+	if (!auto_off && hdev->dev_type == HCI_BREDR)
+		mgmt_powered(hdev, 0);
 
 	hci_inquiry_cache_flush(hdev);
 	hci_pend_le_actions_clear(hdev);
@@ -1618,7 +1701,7 @@
 
 	smp_unregister(hdev);
 
-	hci_notify(hdev, HCI_DEV_DOWN);
+	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
 
 	if (hdev->flush)
 		hdev->flush(hdev);
@@ -1626,9 +1709,8 @@
 	/* Reset device */
 	skb_queue_purge(&hdev->cmd_q);
 	atomic_set(&hdev->cmd_cnt, 1);
-	if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
-	    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
-	    test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
+	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
+	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
 		set_bit(HCI_INIT, &hdev->flags);
 		__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
 		clear_bit(HCI_INIT, &hdev->flags);
@@ -1649,6 +1731,9 @@
 		hdev->sent_cmd = NULL;
 	}
 
+	clear_bit(HCI_RUNNING, &hdev->flags);
+	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+
 	/* After this point our queues are empty
 	 * and no tasks are scheduled. */
 	hdev->close(hdev);
@@ -2849,30 +2934,6 @@
 }
 
 /* This function requires the caller holds hdev->lock */
-struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
-						    bdaddr_t *addr,
-						    u8 addr_type)
-{
-	struct hci_conn_params *param;
-
-	list_for_each_entry(param, &hdev->pend_le_conns, action) {
-		if (bacmp(&param->addr, addr) == 0 &&
-		    param->addr_type == addr_type &&
-		    param->explicit_connect)
-			return param;
-	}
-
-	list_for_each_entry(param, &hdev->pend_le_reports, action) {
-		if (bacmp(&param->addr, addr) == 0 &&
-		    param->addr_type == addr_type &&
-		    param->explicit_connect)
-			return param;
-	}
-
-	return NULL;
-}
-
-/* This function requires the caller holds hdev->lock */
 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
 					    bdaddr_t *addr, u8 addr_type)
 {
@@ -3346,7 +3407,7 @@
 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
 
-	hci_notify(hdev, HCI_DEV_REG);
+	hci_sock_dev_event(hdev, HCI_DEV_REG);
 	hci_dev_hold(hdev);
 
 	queue_work(hdev->req_workqueue, &hdev->power_on);
@@ -3394,7 +3455,7 @@
 	 * pending list */
 	BUG_ON(!list_empty(&hdev->mgmt_pending));
 
-	hci_notify(hdev, HCI_DEV_UNREG);
+	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
 
 	if (hdev->rfkill) {
 		rfkill_unregister(hdev->rfkill);
@@ -3431,7 +3492,7 @@
 /* Suspend HCI device */
 int hci_suspend_dev(struct hci_dev *hdev)
 {
-	hci_notify(hdev, HCI_DEV_SUSPEND);
+	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
 	return 0;
 }
 EXPORT_SYMBOL(hci_suspend_dev);
@@ -3439,7 +3500,7 @@
 /* Resume HCI device */
 int hci_resume_dev(struct hci_dev *hdev)
 {
-	hci_notify(hdev, HCI_DEV_RESUME);
+	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
 	return 0;
 }
 EXPORT_SYMBOL(hci_resume_dev);
@@ -3471,6 +3532,13 @@
 		return -ENXIO;
 	}
 
+	if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
+	    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+	    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
 	/* Incoming skb */
 	bt_cb(skb)->incoming = 1;
 
@@ -3484,6 +3552,22 @@
 }
 EXPORT_SYMBOL(hci_recv_frame);
 
+/* Receive diagnostic message from HCI drivers */
+int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	/* Mark as diagnostic packet */
+	bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
+
+	/* Time stamp */
+	__net_timestamp(skb);
+
+	skb_queue_tail(&hdev->rx_q, skb);
+	queue_work(hdev->workqueue, &hdev->rx_work);
+
+	return 0;
+}
+EXPORT_SYMBOL(hci_recv_diag);
+
 /* ---- Interface to upper protocols ---- */
 
 int hci_register_cb(struct hci_cb *cb)
@@ -3530,6 +3614,11 @@
 	/* Get rid of skb owner, prior to sending to the driver. */
 	skb_orphan(skb);
 
+	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
+		kfree_skb(skb);
+		return;
+	}
+
 	err = hdev->send(hdev, skb);
 	if (err < 0) {
 		BT_ERR("%s sending frame failed (%d)", hdev->name, err);
@@ -3554,7 +3643,7 @@
 	/* Stand-alone HCI commands must be flagged as
 	 * single-command requests.
 	 */
-	bt_cb(skb)->req.start = true;
+	bt_cb(skb)->hci.req_start = true;
 
 	skb_queue_tail(&hdev->cmd_q, skb);
 	queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -3580,6 +3669,25 @@
 	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
 }
 
+/* Send HCI command and wait for command commplete event */
+struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+			     const void *param, u32 timeout)
+{
+	struct sk_buff *skb;
+
+	if (!test_bit(HCI_UP, &hdev->flags))
+		return ERR_PTR(-ENETDOWN);
+
+	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
+
+	hci_req_lock(hdev);
+	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
+	hci_req_unlock(hdev);
+
+	return skb;
+}
+EXPORT_SYMBOL(hci_cmd_sync);
+
 /* Send ACL data */
 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
 {
@@ -4232,7 +4340,7 @@
 	if (!skb)
 		return true;
 
-	return bt_cb(skb)->req.start;
+	return bt_cb(skb)->hci.req_start;
 }
 
 static void hci_resend_last(struct hci_dev *hdev)
@@ -4292,26 +4400,26 @@
 	 * callback would be found in hdev->sent_cmd instead of the
 	 * command queue (hdev->cmd_q).
 	 */
-	if (bt_cb(hdev->sent_cmd)->req.complete) {
-		*req_complete = bt_cb(hdev->sent_cmd)->req.complete;
+	if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
+		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
 		return;
 	}
 
-	if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
-		*req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
+	if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
+		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
 		return;
 	}
 
 	/* Remove all pending commands belonging to this request */
 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
 	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
-		if (bt_cb(skb)->req.start) {
+		if (bt_cb(skb)->hci.req_start) {
 			__skb_queue_head(&hdev->cmd_q, skb);
 			break;
 		}
 
-		*req_complete = bt_cb(skb)->req.complete;
-		*req_complete_skb = bt_cb(skb)->req.complete_skb;
+		*req_complete = bt_cb(skb)->hci.req_complete;
+		*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
 		kfree_skb(skb);
 	}
 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 8acec93..d57c11c 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -55,7 +55,12 @@
 	wake_up_bit(&hdev->flags, HCI_INQUIRY);
 
 	hci_dev_lock(hdev);
-	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+	/* Set discovery state to stopped if we're not doing LE active
+	 * scanning.
+	 */
+	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
+	    hdev->le_scan_type != LE_SCAN_ACTIVE)
+		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
 	hci_dev_unlock(hdev);
 
 	hci_conn_check_pending(hdev);
@@ -1910,7 +1915,8 @@
 
 	hci_dev_lock(hdev);
 
-	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
+	conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
+				       cp->peer_addr_type);
 	if (!conn)
 		goto unlock;
 
@@ -3132,7 +3138,7 @@
 	 * complete event).
 	 */
 	if (ev->status ||
-	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
+	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
 		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
 				     req_complete_skb);
 
@@ -4648,8 +4654,8 @@
 	/* If we're not connectable only connect devices that we have in
 	 * our pend_le_conns list.
 	 */
-	params = hci_explicit_connect_lookup(hdev, addr, addr_type);
-
+	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
+					   addr_type);
 	if (!params)
 		return NULL;
 
@@ -5203,7 +5209,7 @@
 	u8 status = 0, event = hdr->evt, req_evt = 0;
 	u16 opcode = HCI_OP_NOP;
 
-	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
+	if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
 		struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
 		opcode = __le16_to_cpu(cmd_hdr->opcode);
 		hci_req_cmd_complete(hdev, opcode, status, &req_complete,
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index b736922..981f8a2 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -56,8 +56,8 @@
 		return -ENODATA;
 
 	skb = skb_peek_tail(&req->cmd_q);
-	bt_cb(skb)->req.complete = complete;
-	bt_cb(skb)->req.complete_skb = complete_skb;
+	bt_cb(skb)->hci.req_complete = complete;
+	bt_cb(skb)->hci.req_complete_skb = complete_skb;
 
 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
@@ -99,7 +99,7 @@
 	BT_DBG("skb len %d", skb->len);
 
 	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
-	bt_cb(skb)->opcode = opcode;
+	bt_cb(skb)->hci.opcode = opcode;
 
 	return skb;
 }
@@ -128,9 +128,9 @@
 	}
 
 	if (skb_queue_empty(&req->cmd_q))
-		bt_cb(skb)->req.start = true;
+		bt_cb(skb)->hci.req_start = true;
 
-	bt_cb(skb)->req.event = event;
+	bt_cb(skb)->hci.req_event = event;
 
 	skb_queue_tail(&req->cmd_q, skb);
 }
@@ -564,3 +564,96 @@
 	if (err && err != -ENODATA)
 		BT_ERR("Failed to run HCI request: err %d", err);
 }
+
+void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
+		      u8 reason)
+{
+	switch (conn->state) {
+	case BT_CONNECTED:
+	case BT_CONFIG:
+		if (conn->type == AMP_LINK) {
+			struct hci_cp_disconn_phy_link cp;
+
+			cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
+			cp.reason = reason;
+			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
+				    &cp);
+		} else {
+			struct hci_cp_disconnect dc;
+
+			dc.handle = cpu_to_le16(conn->handle);
+			dc.reason = reason;
+			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+		}
+
+		conn->state = BT_DISCONN;
+
+		break;
+	case BT_CONNECT:
+		if (conn->type == LE_LINK) {
+			if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+				break;
+			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
+				    0, NULL);
+		} else if (conn->type == ACL_LINK) {
+			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
+				break;
+			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
+				    6, &conn->dst);
+		}
+		break;
+	case BT_CONNECT2:
+		if (conn->type == ACL_LINK) {
+			struct hci_cp_reject_conn_req rej;
+
+			bacpy(&rej.bdaddr, &conn->dst);
+			rej.reason = reason;
+
+			hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
+				    sizeof(rej), &rej);
+		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
+			struct hci_cp_reject_sync_conn_req rej;
+
+			bacpy(&rej.bdaddr, &conn->dst);
+
+			/* SCO rejection has its own limited set of
+			 * allowed error values (0x0D-0x0F) which isn't
+			 * compatible with most values passed to this
+			 * function. To be safe hard-code one of the
+			 * values that's suitable for SCO.
+			 */
+			rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
+
+			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
+				    sizeof(rej), &rej);
+		}
+		break;
+	default:
+		conn->state = BT_CLOSED;
+		break;
+	}
+}
+
+static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+{
+	if (status)
+		BT_DBG("Failed to abort connection: status 0x%2.2x", status);
+}
+
+int hci_abort_conn(struct hci_conn *conn, u8 reason)
+{
+	struct hci_request req;
+	int err;
+
+	hci_req_init(&req, conn->hdev);
+
+	__hci_abort_conn(&req, conn, reason);
+
+	err = hci_req_run(&req, abort_conn_complete);
+	if (err && err != -ENODATA) {
+		BT_ERR("Failed to run HCI request: err %d", err);
+		return err;
+	}
+
+	return 0;
+}
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index bf6df92..25c7f13 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -55,3 +55,7 @@
 
 void hci_update_background_scan(struct hci_dev *hdev);
 void __hci_update_background_scan(struct hci_request *req);
+
+int hci_abort_conn(struct hci_conn *conn, u8 reason);
+void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
+		      u8 reason);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 1505563..b1eb8c0 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -120,10 +120,7 @@
 	/* Apply filter */
 	flt = &hci_pi(sk)->filter;
 
-	if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
-		flt_type = 0;
-	else
-		flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
+	flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
 
 	if (!test_bit(flt_type, &flt->type_mask))
 		return true;
@@ -173,6 +170,11 @@
 			continue;
 
 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
+			if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
+			    bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
+			    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+			    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
+				continue;
 			if (is_filtered_packet(sk, skb))
 				continue;
 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
@@ -279,6 +281,9 @@
 		else
 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
 		break;
+	case HCI_DIAG_PKT:
+		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
+		break;
 	default:
 		return;
 	}
@@ -303,6 +308,7 @@
 {
 	struct hci_mon_hdr *hdr;
 	struct hci_mon_new_index *ni;
+	struct hci_mon_index_info *ii;
 	struct sk_buff *skb;
 	__le16 opcode;
 
@@ -312,7 +318,7 @@
 		if (!skb)
 			return NULL;
 
-		ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
+		ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
 		ni->type = hdev->dev_type;
 		ni->bus = hdev->bus;
 		bacpy(&ni->bdaddr, &hdev->bdaddr);
@@ -329,6 +335,40 @@
 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
 		break;
 
+	case HCI_DEV_SETUP:
+		if (hdev->manufacturer == 0xffff)
+			return NULL;
+
+		/* fall through */
+
+	case HCI_DEV_UP:
+		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
+		if (!skb)
+			return NULL;
+
+		ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
+		bacpy(&ii->bdaddr, &hdev->bdaddr);
+		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
+
+		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
+		break;
+
+	case HCI_DEV_OPEN:
+		skb = bt_skb_alloc(0, GFP_ATOMIC);
+		if (!skb)
+			return NULL;
+
+		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
+		break;
+
+	case HCI_DEV_CLOSE:
+		skb = bt_skb_alloc(0, GFP_ATOMIC);
+		if (!skb)
+			return NULL;
+
+		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
+		break;
+
 	default:
 		return NULL;
 	}
@@ -358,6 +398,28 @@
 
 		if (sock_queue_rcv_skb(sk, skb))
 			kfree_skb(skb);
+
+		if (!test_bit(HCI_RUNNING, &hdev->flags))
+			continue;
+
+		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
+		if (!skb)
+			continue;
+
+		if (sock_queue_rcv_skb(sk, skb))
+			kfree_skb(skb);
+
+		if (test_bit(HCI_UP, &hdev->flags))
+			skb = create_monitor_event(hdev, HCI_DEV_UP);
+		else if (hci_dev_test_flag(hdev, HCI_SETUP))
+			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
+		else
+			skb = NULL;
+
+		if (skb) {
+			if (sock_queue_rcv_skb(sk, skb))
+				kfree_skb(skb);
+		}
 	}
 
 	read_unlock(&hci_dev_list_lock);
@@ -392,14 +454,12 @@
 
 void hci_sock_dev_event(struct hci_dev *hdev, int event)
 {
-	struct hci_ev_si_device ev;
-
 	BT_DBG("hdev %s event %d", hdev->name, event);
 
-	/* Send event to monitor */
 	if (atomic_read(&monitor_promisc)) {
 		struct sk_buff *skb;
 
+		/* Send event to monitor */
 		skb = create_monitor_event(hdev, event);
 		if (skb) {
 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
@@ -408,10 +468,14 @@
 		}
 	}
 
-	/* Send event to sockets */
-	ev.event  = event;
-	ev.dev_id = hdev->id;
-	hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
+	if (event <= HCI_DEV_DOWN) {
+		struct hci_ev_si_device ev;
+
+		/* Send event to sockets */
+		ev.event  = event;
+		ev.dev_id = hdev->id;
+		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
+	}
 
 	if (event == HCI_DEV_UNREG) {
 		struct sock *sk;
@@ -937,7 +1001,7 @@
 
 	BT_DBG("sock %p, sk %p", sock, sk);
 
-	if (flags & (MSG_OOB))
+	if (flags & MSG_OOB)
 		return -EOPNOTSUPP;
 
 	if (sk->sk_state == BT_CLOSED)
@@ -1185,7 +1249,7 @@
 			/* Stand-alone HCI commands must be flagged as
 			 * single-command requests.
 			 */
-			bt_cb(skb)->req.start = true;
+			bt_cb(skb)->hci.req_start = true;
 
 			skb_queue_tail(&hdev->cmd_q, skb);
 			queue_work(hdev->workqueue, &hdev->cmd_work);
@@ -1196,6 +1260,12 @@
 			goto drop;
 		}
 
+		if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+		    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+			err = -EINVAL;
+			goto drop;
+		}
+
 		skb_queue_tail(&hdev->raw_q, skb);
 		queue_work(hdev->workqueue, &hdev->tx_work);
 	}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index f1a117f..0bec458 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -401,6 +401,20 @@
 {
 	struct hidp_session *session = (struct hidp_session *) arg;
 
+	/* The HIDP user-space API only contains calls to add and remove
+	 * devices. There is no way to forward events of any kind. Therefore,
+	 * we have to forcefully disconnect a device on idle-timeouts. This is
+	 * unfortunate and weird API design, but it is spec-compliant and
+	 * required for backwards-compatibility. Hence, on idle-timeout, we
+	 * signal driver-detach events, so poll() will be woken up with an
+	 * error-condition on both sockets.
+	 */
+
+	session->intr_sock->sk->sk_err = EUNATCH;
+	session->ctrl_sock->sk->sk_err = EUNATCH;
+	wake_up_interruptible(sk_sleep(session->intr_sock->sk));
+	wake_up_interruptible(sk_sleep(session->ctrl_sock->sk));
+
 	hidp_session_terminate(session);
 }
 
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 586b3d5..1bb5515 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1111,53 +1111,76 @@
 	if (!sk)
 		return 0;
 
+	lock_sock(sk);
+
+	if (sk->sk_shutdown)
+		goto shutdown_already;
+
+	BT_DBG("Handling sock shutdown");
+
 	/* prevent sk structure from being freed whilst unlocked */
 	sock_hold(sk);
 
 	chan = l2cap_pi(sk)->chan;
 	/* prevent chan structure from being freed whilst unlocked */
 	l2cap_chan_hold(chan);
-	conn = chan->conn;
 
 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
 
+	if (chan->mode == L2CAP_MODE_ERTM &&
+	    chan->unacked_frames > 0 &&
+	    chan->state == BT_CONNECTED) {
+		err = __l2cap_wait_ack(sk, chan);
+
+		/* After waiting for ACKs, check whether shutdown
+		 * has already been actioned to close the L2CAP
+		 * link such as by l2cap_disconnection_req().
+		 */
+		if (sk->sk_shutdown)
+			goto has_shutdown;
+	}
+
+	sk->sk_shutdown = SHUTDOWN_MASK;
+	release_sock(sk);
+
+	l2cap_chan_lock(chan);
+	conn = chan->conn;
 	if (conn)
+		/* prevent conn structure from being freed */
+		l2cap_conn_get(conn);
+	l2cap_chan_unlock(chan);
+
+	if (conn)
+		/* mutex lock must be taken before l2cap_chan_lock() */
 		mutex_lock(&conn->chan_lock);
 
 	l2cap_chan_lock(chan);
-	lock_sock(sk);
+	l2cap_chan_close(chan, 0);
+	l2cap_chan_unlock(chan);
 
-	if (!sk->sk_shutdown) {
-		if (chan->mode == L2CAP_MODE_ERTM &&
-		    chan->unacked_frames > 0 &&
-		    chan->state == BT_CONNECTED)
-			err = __l2cap_wait_ack(sk, chan);
-
-		sk->sk_shutdown = SHUTDOWN_MASK;
-
-		release_sock(sk);
-		l2cap_chan_close(chan, 0);
-		lock_sock(sk);
-
-		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
-		    !(current->flags & PF_EXITING))
-			err = bt_sock_wait_state(sk, BT_CLOSED,
-						 sk->sk_lingertime);
+	if (conn) {
+		mutex_unlock(&conn->chan_lock);
+		l2cap_conn_put(conn);
 	}
 
+	lock_sock(sk);
+
+	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
+	    !(current->flags & PF_EXITING))
+		err = bt_sock_wait_state(sk, BT_CLOSED,
+					 sk->sk_lingertime);
+
+has_shutdown:
+	l2cap_chan_put(chan);
+	sock_put(sk);
+
+shutdown_already:
 	if (!err && sk->sk_err)
 		err = -sk->sk_err;
 
 	release_sock(sk);
-	l2cap_chan_unlock(chan);
 
-	if (conn)
-		mutex_unlock(&conn->chan_lock);
-
-	l2cap_chan_put(chan);
-	sock_put(sk);
-
-	BT_DBG("err: %d", err);
+	BT_DBG("Sock shutdown complete err: %d", err);
 
 	return err;
 }
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 8b4cdce..aa4cf64 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -151,6 +151,22 @@
 }
 EXPORT_SYMBOL(bt_info);
 
+void bt_warn(const char *format, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, format);
+
+	vaf.fmt = format;
+	vaf.va = &args;
+
+	pr_warn("%pV", &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL(bt_warn);
+
 void bt_err(const char *format, ...)
 {
 	struct va_format vaf;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index ccaf5a4..7f22119 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -268,6 +268,14 @@
 			       HCI_SOCK_TRUSTED, skip_sk);
 }
 
+static u8 le_addr_type(u8 mgmt_addr_type)
+{
+	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
+		return ADDR_LE_DEV_PUBLIC;
+	else
+		return ADDR_LE_DEV_RANDOM;
+}
+
 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
 			u16 data_len)
 {
@@ -1631,35 +1639,8 @@
 	discov_stopped = hci_stop_discovery(&req);
 
 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
-		struct hci_cp_disconnect dc;
-		struct hci_cp_reject_conn_req rej;
-
-		switch (conn->state) {
-		case BT_CONNECTED:
-		case BT_CONFIG:
-			dc.handle = cpu_to_le16(conn->handle);
-			dc.reason = 0x15; /* Terminated due to Power Off */
-			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
-			break;
-		case BT_CONNECT:
-			if (conn->type == LE_LINK)
-				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
-					    0, NULL);
-			else if (conn->type == ACL_LINK)
-				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
-					    6, &conn->dst);
-			break;
-		case BT_CONNECT2:
-			bacpy(&rej.bdaddr, &conn->dst);
-			rej.reason = 0x15; /* Terminated due to Power Off */
-			if (conn->type == ACL_LINK)
-				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
-					    sizeof(rej), &rej);
-			else if (conn->type == SCO_LINK)
-				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
-					    sizeof(rej), &rej);
-			break;
-		}
+		/* 0x15 == Terminated due to Power Off */
+		__hci_abort_conn(&req, conn, 0x15);
 	}
 
 	err = hci_req_run(&req, clean_up_hci_complete);
@@ -3044,9 +3025,10 @@
 {
 	struct mgmt_cp_unpair_device *cp = data;
 	struct mgmt_rp_unpair_device rp;
-	struct hci_cp_disconnect dc;
+	struct hci_conn_params *params;
 	struct mgmt_pending_cmd *cmd;
 	struct hci_conn *conn;
+	u8 addr_type;
 	int err;
 
 	memset(&rp, 0, sizeof(rp));
@@ -3087,36 +3069,23 @@
 			conn = NULL;
 
 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
-	} else {
-		u8 addr_type;
-
-		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
-					       &cp->addr.bdaddr);
-		if (conn) {
-			/* Defer clearing up the connection parameters
-			 * until closing to give a chance of keeping
-			 * them if a repairing happens.
-			 */
-			set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
-
-			/* If disconnection is not requested, then
-			 * clear the connection variable so that the
-			 * link is not terminated.
-			 */
-			if (!cp->disconnect)
-				conn = NULL;
+		if (err < 0) {
+			err = mgmt_cmd_complete(sk, hdev->id,
+						MGMT_OP_UNPAIR_DEVICE,
+						MGMT_STATUS_NOT_PAIRED, &rp,
+						sizeof(rp));
+			goto unlock;
 		}
 
-		if (cp->addr.type == BDADDR_LE_PUBLIC)
-			addr_type = ADDR_LE_DEV_PUBLIC;
-		else
-			addr_type = ADDR_LE_DEV_RANDOM;
-
-		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
-
-		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
+		goto done;
 	}
 
+	/* LE address type */
+	addr_type = le_addr_type(cp->addr.type);
+
+	hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
+
+	err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
 	if (err < 0) {
 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
 					MGMT_STATUS_NOT_PAIRED, &rp,
@@ -3124,6 +3093,36 @@
 		goto unlock;
 	}
 
+	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
+	if (!conn) {
+		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
+		goto done;
+	}
+
+	/* Abort any ongoing SMP pairing */
+	smp_cancel_pairing(conn);
+
+	/* Defer clearing up the connection parameters until closing to
+	 * give a chance of keeping them if a repairing happens.
+	 */
+	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
+
+	/* Disable auto-connection parameters if present */
+	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
+	if (params) {
+		if (params->explicit_connect)
+			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+		else
+			params->auto_connect = HCI_AUTO_CONN_DISABLED;
+	}
+
+	/* If disconnection is not requested, then clear the connection
+	 * variable so that the link is not terminated.
+	 */
+	if (!cp->disconnect)
+		conn = NULL;
+
+done:
 	/* If the connection variable is set, then termination of the
 	 * link is requested.
 	 */
@@ -3143,9 +3142,7 @@
 
 	cmd->cmd_complete = addr_cmd_complete;
 
-	dc.handle = cpu_to_le16(conn->handle);
-	dc.reason = 0x13; /* Remote User Terminated Connection */
-	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
 	if (err < 0)
 		mgmt_pending_remove(cmd);
 
@@ -3193,7 +3190,8 @@
 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
 					       &cp->addr.bdaddr);
 	else
-		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
+		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
+					       le_addr_type(cp->addr.type));
 
 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
@@ -3544,14 +3542,8 @@
 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
 				       auth_type);
 	} else {
-		u8 addr_type;
-
-		/* Convert from L2CAP channel address type to HCI address type
-		 */
-		if (cp->addr.type == BDADDR_LE_PUBLIC)
-			addr_type = ADDR_LE_DEV_PUBLIC;
-		else
-			addr_type = ADDR_LE_DEV_RANDOM;
+		u8 addr_type = le_addr_type(cp->addr.type);
+		struct hci_conn_params *p;
 
 		/* When pairing a new device, it is expected to remember
 		 * this device for future connections. Adding the connection
@@ -3562,7 +3554,10 @@
 		 * If connection parameters already exist, then they
 		 * will be kept and this function does nothing.
 		 */
-		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
+		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
+
+		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+			p->auto_connect = HCI_AUTO_CONN_DISABLED;
 
 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
 					   addr_type, sec_level,
@@ -3693,7 +3688,8 @@
 	if (addr->type == BDADDR_BREDR)
 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
 	else
-		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
+		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
+					       le_addr_type(addr->type));
 
 	if (!conn) {
 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
@@ -5596,14 +5592,9 @@
 
 	for (i = 0; i < irk_count; i++) {
 		struct mgmt_irk_info *irk = &cp->irks[i];
-		u8 addr_type;
 
-		if (irk->addr.type == BDADDR_LE_PUBLIC)
-			addr_type = ADDR_LE_DEV_PUBLIC;
-		else
-			addr_type = ADDR_LE_DEV_RANDOM;
-
-		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
+		hci_add_irk(hdev, &irk->addr.bdaddr,
+			    le_addr_type(irk->addr.type), irk->val,
 			    BDADDR_ANY);
 	}
 
@@ -5683,12 +5674,7 @@
 
 	for (i = 0; i < key_count; i++) {
 		struct mgmt_ltk_info *key = &cp->keys[i];
-		u8 type, addr_type, authenticated;
-
-		if (key->addr.type == BDADDR_LE_PUBLIC)
-			addr_type = ADDR_LE_DEV_PUBLIC;
-		else
-			addr_type = ADDR_LE_DEV_RANDOM;
+		u8 type, authenticated;
 
 		switch (key->type) {
 		case MGMT_LTK_UNAUTHENTICATED:
@@ -5714,9 +5700,9 @@
 			continue;
 		}
 
-		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
-			    authenticated, key->val, key->enc_size, key->ediv,
-			    key->rand);
+		hci_add_ltk(hdev, &key->addr.bdaddr,
+			    le_addr_type(key->addr.type), type, authenticated,
+			    key->val, key->enc_size, key->ediv, key->rand);
 	}
 
 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
@@ -6117,14 +6103,21 @@
 		__hci_update_background_scan(req);
 		break;
 	case HCI_AUTO_CONN_REPORT:
-		list_add(&params->action, &hdev->pend_le_reports);
+		if (params->explicit_connect)
+			list_add(&params->action, &hdev->pend_le_conns);
+		else
+			list_add(&params->action, &hdev->pend_le_reports);
 		__hci_update_background_scan(req);
 		break;
 	case HCI_AUTO_CONN_DIRECT:
 	case HCI_AUTO_CONN_ALWAYS:
 		if (!is_connected(hdev, addr, addr_type)) {
 			list_add(&params->action, &hdev->pend_le_conns);
-			__hci_update_background_scan(req);
+			/* If we are in scan phase of connecting, we were
+			 * already added to pend_le_conns and scanning.
+			 */
+			if (params->auto_connect != HCI_AUTO_CONN_EXPLICIT)
+				__hci_update_background_scan(req);
 		}
 		break;
 	}
@@ -6221,10 +6214,7 @@
 		goto added;
 	}
 
-	if (cp->addr.type == BDADDR_LE_PUBLIC)
-		addr_type = ADDR_LE_DEV_PUBLIC;
-	else
-		addr_type = ADDR_LE_DEV_RANDOM;
+	addr_type = le_addr_type(cp->addr.type);
 
 	if (cp->action == 0x02)
 		auto_conn = HCI_AUTO_CONN_ALWAYS;
@@ -6353,10 +6343,7 @@
 			goto complete;
 		}
 
-		if (cp->addr.type == BDADDR_LE_PUBLIC)
-			addr_type = ADDR_LE_DEV_PUBLIC;
-		else
-			addr_type = ADDR_LE_DEV_RANDOM;
+		addr_type = le_addr_type(cp->addr.type);
 
 		/* Kernel internally uses conn_params with resolvable private
 		 * address, but Remove Device allows only identity addresses.
@@ -6379,7 +6366,8 @@
 			goto unlock;
 		}
 
-		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
+		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
 			err = cmd->cmd_complete(cmd,
 						MGMT_STATUS_INVALID_PARAMS);
 			mgmt_pending_remove(cmd);
@@ -6415,6 +6403,10 @@
 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
 				continue;
 			device_removed(sk, hdev, &p->addr, p->addr_type);
+			if (p->explicit_connect) {
+				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+				continue;
+			}
 			list_del(&p->action);
 			list_del(&p->list);
 			kfree(p);
@@ -7857,27 +7849,13 @@
 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
 }
 
-void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
+void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
 {
 	struct mgmt_ev_new_irk ev;
 
 	memset(&ev, 0, sizeof(ev));
 
-	/* For identity resolving keys from devices that are already
-	 * using a public address or static random address, do not
-	 * ask for storing this key. The identity resolving key really
-	 * is only mandatory for devices using resolvable random
-	 * addresses.
-	 *
-	 * Storing all identity resolving keys has the downside that
-	 * they will be also loaded on next boot of they system. More
-	 * identity resolving keys, means more time during scanning is
-	 * needed to actually resolve these addresses.
-	 */
-	if (bacmp(&irk->rpa, BDADDR_ANY))
-		ev.store_hint = 0x01;
-	else
-		ev.store_hint = 0x00;
+	ev.store_hint = persistent;
 
 	bacpy(&ev.rpa, &irk->rpa);
 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index f315c8d..fe12966 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -74,7 +74,7 @@
 
 static void sco_sock_timeout(unsigned long arg)
 {
-	struct sock *sk = (struct sock *) arg;
+	struct sock *sk = (struct sock *)arg;
 
 	BT_DBG("sock %p state %d", sk, sk->sk_state);
 
@@ -170,18 +170,21 @@
 	sco_conn_unlock(conn);
 
 	if (sk) {
+		sock_hold(sk);
 		bh_lock_sock(sk);
 		sco_sock_clear_timer(sk);
 		sco_chan_del(sk, err);
 		bh_unlock_sock(sk);
 		sco_sock_kill(sk);
+		sock_put(sk);
 	}
 
 	hcon->sco_data = NULL;
 	kfree(conn);
 }
 
-static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
+static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
+			   struct sock *parent)
 {
 	BT_DBG("conn %p", conn);
 
@@ -414,8 +417,10 @@
 		if (sco_pi(sk)->conn->hcon) {
 			sk->sk_state = BT_DISCONN;
 			sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
+			sco_conn_lock(sco_pi(sk)->conn);
 			hci_conn_drop(sco_pi(sk)->conn->hcon);
 			sco_pi(sk)->conn->hcon = NULL;
+			sco_conn_unlock(sco_pi(sk)->conn);
 		} else
 			sco_chan_del(sk, ECONNRESET);
 		break;
@@ -459,7 +464,8 @@
 	.obj_size	= sizeof(struct sco_pinfo)
 };
 
-static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern)
+static struct sock *sco_sock_alloc(struct net *net, struct socket *sock,
+				   int proto, gfp_t prio, int kern)
 {
 	struct sock *sk;
 
@@ -508,7 +514,8 @@
 	return 0;
 }
 
-static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
+			 int addr_len)
 {
 	struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
 	struct sock *sk = sock->sk;
@@ -615,7 +622,8 @@
 	return err;
 }
 
-static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
+static int sco_sock_accept(struct socket *sock, struct socket *newsock,
+			   int flags)
 {
 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
 	struct sock *sk = sock->sk, *ch;
@@ -669,7 +677,8 @@
 	return err;
 }
 
-static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
+static int sco_sock_getname(struct socket *sock, struct sockaddr *addr,
+			    int *len, int peer)
 {
 	struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
 	struct sock *sk = sock->sk;
@@ -779,7 +788,8 @@
 	return bt_sock_recvmsg(sock, msg, len, flags);
 }
 
-static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
+static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
+			       char __user *optval, unsigned int optlen)
 {
 	struct sock *sk = sock->sk;
 	int len, err = 0;
@@ -819,7 +829,7 @@
 		voice.setting = sco_pi(sk)->setting;
 
 		len = min_t(unsigned int, sizeof(voice), optlen);
-		if (copy_from_user((char *) &voice, optval, len)) {
+		if (copy_from_user((char *)&voice, optval, len)) {
 			err = -EFAULT;
 			break;
 		}
@@ -843,7 +853,8 @@
 	return err;
 }
 
-static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
+static int sco_sock_getsockopt_old(struct socket *sock, int optname,
+				   char __user *optval, int __user *optlen)
 {
 	struct sock *sk = sock->sk;
 	struct sco_options opts;
@@ -903,7 +914,8 @@
 	return err;
 }
 
-static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
+static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
+			       char __user *optval, int __user *optlen)
 {
 	struct sock *sk = sock->sk;
 	int len, err = 0;
@@ -928,7 +940,7 @@
 		}
 
 		if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
-			     (u32 __user *) optval))
+			     (u32 __user *)optval))
 			err = -EFAULT;
 
 		break;
@@ -961,7 +973,9 @@
 	if (!sk)
 		return 0;
 
+	sock_hold(sk);
 	lock_sock(sk);
+
 	if (!sk->sk_shutdown) {
 		sk->sk_shutdown = SHUTDOWN_MASK;
 		sco_sock_clear_timer(sk);
@@ -972,7 +986,10 @@
 			err = bt_sock_wait_state(sk, BT_CLOSED,
 						 sk->sk_lingertime);
 	}
+
 	release_sock(sk);
+	sock_put(sk);
+
 	return err;
 }
 
@@ -1016,6 +1033,11 @@
 	} else {
 		sco_conn_lock(conn);
 
+		if (!conn->hcon) {
+			sco_conn_unlock(conn);
+			return;
+		}
+
 		parent = sco_get_sock_listen(&conn->hcon->src);
 		if (!parent) {
 			sco_conn_unlock(conn);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 25644e1..c913538 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -811,7 +811,6 @@
 		smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
 			     &reason);
 
-	clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags);
 	mgmt_auth_failed(hcon, HCI_ERROR_AUTH_FAILURE);
 
 	if (chan->data)
@@ -1046,8 +1045,24 @@
 	struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1];
 	bool persistent;
 
+	if (hcon->type == ACL_LINK) {
+		if (hcon->key_type == HCI_LK_DEBUG_COMBINATION)
+			persistent = false;
+		else
+			persistent = !test_bit(HCI_CONN_FLUSH_KEY,
+					       &hcon->flags);
+	} else {
+		/* The LTKs, IRKs and CSRKs should be persistent only if
+		 * both sides had the bonding bit set in their
+		 * authentication requests.
+		 */
+		persistent = !!((req->auth_req & rsp->auth_req) &
+				SMP_AUTH_BONDING);
+	}
+
 	if (smp->remote_irk) {
-		mgmt_new_irk(hdev, smp->remote_irk);
+		mgmt_new_irk(hdev, smp->remote_irk, persistent);
+
 		/* Now that user space can be considered to know the
 		 * identity address track the connection based on it
 		 * from now on (assuming this is an LE link).
@@ -1075,21 +1090,6 @@
 		}
 	}
 
-	if (hcon->type == ACL_LINK) {
-		if (hcon->key_type == HCI_LK_DEBUG_COMBINATION)
-			persistent = false;
-		else
-			persistent = !test_bit(HCI_CONN_FLUSH_KEY,
-					       &hcon->flags);
-	} else {
-		/* The LTKs and CSRKs should be persistent only if both sides
-		 * had the bonding bit set in their authentication requests.
-		 */
-		persistent = !!((req->auth_req & rsp->auth_req) &
-				SMP_AUTH_BONDING);
-	}
-
-
 	if (smp->csrk) {
 		smp->csrk->bdaddr_type = hcon->dst_type;
 		bacpy(&smp->csrk->bdaddr, &hcon->dst);
@@ -2380,6 +2380,32 @@
 	return ret;
 }
 
+void smp_cancel_pairing(struct hci_conn *hcon)
+{
+	struct l2cap_conn *conn = hcon->l2cap_data;
+	struct l2cap_chan *chan;
+	struct smp_chan *smp;
+
+	if (!conn)
+		return;
+
+	chan = conn->smp;
+	if (!chan)
+		return;
+
+	l2cap_chan_lock(chan);
+
+	smp = chan->data;
+	if (smp) {
+		if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
+			smp_failure(conn, 0);
+		else
+			smp_failure(conn, SMP_UNSPECIFIED);
+	}
+
+	l2cap_chan_unlock(chan);
+}
+
 static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
 {
 	struct smp_cmd_encrypt_info *rp = (void *) skb->data;
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 6cf8725..ffcc70b 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -180,6 +180,7 @@
 };
 
 /* SMP Commands */
+void smp_cancel_pairing(struct hci_conn *hcon);
 bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
 			     enum smp_key_pref key_pref);
 int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 2f81624..5e88d3e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -56,7 +56,7 @@
 	skb_reset_mac_header(skb);
 	skb_pull(skb, ETH_HLEN);
 
-	if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
+	if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
 		goto out;
 
 	if (is_broadcast_ether_addr(dest))
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 6663cc0..a642bb8 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -133,15 +133,16 @@
 
 static void fdb_del_external_learn(struct net_bridge_fdb_entry *f)
 {
-	struct switchdev_obj obj = {
-		.id = SWITCHDEV_OBJ_PORT_FDB,
-		.u.fdb = {
-			.addr = f->addr.addr,
-			.vid = f->vlan_id,
+	struct switchdev_obj_port_fdb fdb = {
+		.obj = {
+			.id = SWITCHDEV_OBJ_ID_PORT_FDB,
+			.flags = SWITCHDEV_F_DEFER,
 		},
+		.vid = f->vlan_id,
 	};
 
-	switchdev_port_obj_del(f->dst->dev, &obj);
+	ether_addr_copy(fdb.addr, f->addr.addr);
+	switchdev_port_obj_del(f->dst->dev, &fdb.obj);
 }
 
 static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
@@ -163,22 +164,27 @@
 			     struct net_bridge_fdb_entry *f)
 {
 	const unsigned char *addr = f->addr.addr;
-	u16 vid = f->vlan_id;
+	struct net_bridge_vlan_group *vg;
+	const struct net_bridge_vlan *v;
 	struct net_bridge_port *op;
+	u16 vid = f->vlan_id;
 
 	/* Maybe another port has same hw addr? */
 	list_for_each_entry(op, &br->port_list, list) {
+		vg = nbp_vlan_group(op);
 		if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
-		    (!vid || nbp_vlan_find(op, vid))) {
+		    (!vid || br_vlan_find(vg, vid))) {
 			f->dst = op;
 			f->added_by_user = 0;
 			return;
 		}
 	}
 
+	vg = br_vlan_group(br);
+	v = br_vlan_find(vg, vid);
 	/* Maybe bridge device has same hw addr? */
 	if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
-	    (!vid || br_vlan_find(br, vid))) {
+	    (!vid || (v && br_vlan_should_use(v)))) {
 		f->dst = NULL;
 		f->added_by_user = 0;
 		return;
@@ -203,14 +209,14 @@
 
 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
 {
+	struct net_bridge_vlan_group *vg;
 	struct net_bridge *br = p->br;
-	struct net_port_vlans *pv = nbp_get_vlan_info(p);
-	bool no_vlan = !pv;
+	struct net_bridge_vlan *v;
 	int i;
-	u16 vid;
 
 	spin_lock_bh(&br->hash_lock);
 
+	vg = nbp_vlan_group(p);
 	/* Search all chains since old address/hash is unknown */
 	for (i = 0; i < BR_HASH_SIZE; i++) {
 		struct hlist_node *h;
@@ -226,7 +232,7 @@
 				 * configured, we can safely be done at
 				 * this point.
 				 */
-				if (no_vlan)
+				if (!vg || !vg->num_vlans)
 					goto insert;
 			}
 		}
@@ -236,15 +242,15 @@
 	/* insert new address,  may fail if invalid address or dup. */
 	fdb_insert(br, p, newaddr, 0);
 
-	if (no_vlan)
+	if (!vg || !vg->num_vlans)
 		goto done;
 
 	/* Now add entries for every VLAN configured on the port.
 	 * This function runs under RTNL so the bitmap will not change
 	 * from under us.
 	 */
-	for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
-		fdb_insert(br, p, newaddr, vid);
+	list_for_each_entry(v, &vg->vlan_list, vlist)
+		fdb_insert(br, p, newaddr, v->vid);
 
 done:
 	spin_unlock_bh(&br->hash_lock);
@@ -252,9 +258,9 @@
 
 void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
 {
+	struct net_bridge_vlan_group *vg;
 	struct net_bridge_fdb_entry *f;
-	struct net_port_vlans *pv;
-	u16 vid = 0;
+	struct net_bridge_vlan *v;
 
 	spin_lock_bh(&br->hash_lock);
 
@@ -264,20 +270,18 @@
 		fdb_delete_local(br, NULL, f);
 
 	fdb_insert(br, NULL, newaddr, 0);
-
+	vg = br_vlan_group(br);
+	if (!vg || !vg->num_vlans)
+		goto out;
 	/* Now remove and add entries for every VLAN configured on the
 	 * bridge.  This function runs under RTNL so the bitmap will not
 	 * change from under us.
 	 */
-	pv = br_get_vlan_info(br);
-	if (!pv)
-		goto out;
-
-	for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
-		f = __br_fdb_get(br, br->dev->dev_addr, vid);
+	list_for_each_entry(v, &vg->vlan_list, vlist) {
+		f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
 		if (f && f->is_local && !f->dst)
 			fdb_delete_local(br, NULL, f);
-		fdb_insert(br, NULL, newaddr, vid);
+		fdb_insert(br, NULL, newaddr, v->vid);
 	}
 out:
 	spin_unlock_bh(&br->hash_lock);
@@ -491,7 +495,9 @@
 static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
 					       struct net_bridge_port *source,
 					       const unsigned char *addr,
-					       __u16 vid)
+					       __u16 vid,
+					       unsigned char is_local,
+					       unsigned char is_static)
 {
 	struct net_bridge_fdb_entry *fdb;
 
@@ -500,8 +506,8 @@
 		memcpy(fdb->addr.addr, addr, ETH_ALEN);
 		fdb->dst = source;
 		fdb->vlan_id = vid;
-		fdb->is_local = 0;
-		fdb->is_static = 0;
+		fdb->is_local = is_local;
+		fdb->is_static = is_static;
 		fdb->added_by_user = 0;
 		fdb->added_by_external_learn = 0;
 		fdb->updated = fdb->used = jiffies;
@@ -532,11 +538,10 @@
 		fdb_delete(br, fdb);
 	}
 
-	fdb = fdb_create(head, source, addr, vid);
+	fdb = fdb_create(head, source, addr, vid, 1, 1);
 	if (!fdb)
 		return -ENOMEM;
 
-	fdb->is_local = fdb->is_static = 1;
 	fdb_add_hw_addr(br, addr);
 	fdb_notify(br, fdb, RTM_NEWNEIGH);
 	return 0;
@@ -593,7 +598,7 @@
 	} else {
 		spin_lock(&br->hash_lock);
 		if (likely(!fdb_find(head, addr, vid))) {
-			fdb = fdb_create(head, source, addr, vid);
+			fdb = fdb_create(head, source, addr, vid, 0, 0);
 			if (fdb) {
 				if (unlikely(added_by_user))
 					fdb->added_by_user = 1;
@@ -607,13 +612,14 @@
 	}
 }
 
-static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
+static int fdb_to_nud(const struct net_bridge *br,
+		      const struct net_bridge_fdb_entry *fdb)
 {
 	if (fdb->is_local)
 		return NUD_PERMANENT;
 	else if (fdb->is_static)
 		return NUD_NOARP;
-	else if (has_expired(fdb->dst->br, fdb))
+	else if (has_expired(br, fdb))
 		return NUD_STALE;
 	else
 		return NUD_REACHABLE;
@@ -639,7 +645,7 @@
 	ndm->ndm_flags	 = fdb->added_by_external_learn ? NTF_EXT_LEARNED : 0;
 	ndm->ndm_type	 = 0;
 	ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
-	ndm->ndm_state   = fdb_to_nud(fdb);
+	ndm->ndm_state   = fdb_to_nud(br, fdb);
 
 	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
 		goto nla_put_failure;
@@ -769,7 +775,7 @@
 		if (!(flags & NLM_F_CREATE))
 			return -ENOENT;
 
-		fdb = fdb_create(head, source, addr, vid);
+		fdb = fdb_create(head, source, addr, vid, 0, 0);
 		if (!fdb)
 			return -ENOMEM;
 
@@ -784,7 +790,7 @@
 		}
 	}
 
-	if (fdb_to_nud(fdb) != state) {
+	if (fdb_to_nud(br, fdb) != state) {
 		if (state & NUD_PERMANENT) {
 			fdb->is_local = 1;
 			if (!fdb->is_static) {
@@ -844,9 +850,11 @@
 	       struct net_device *dev,
 	       const unsigned char *addr, u16 vid, u16 nlh_flags)
 {
-	struct net_bridge_port *p;
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_port *p = NULL;
+	struct net_bridge_vlan *v;
+	struct net_bridge *br = NULL;
 	int err = 0;
-	struct net_port_vlans *pv;
 
 	if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
 		pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
@@ -858,34 +866,51 @@
 		return -EINVAL;
 	}
 
-	p = br_port_get_rtnl(dev);
-	if (p == NULL) {
-		pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
-			dev->name);
-		return -EINVAL;
+	if (dev->priv_flags & IFF_EBRIDGE) {
+		br = netdev_priv(dev);
+		vg = br_vlan_group(br);
+	} else {
+		p = br_port_get_rtnl(dev);
+		if (!p) {
+			pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
+				dev->name);
+			return -EINVAL;
+		}
+		vg = nbp_vlan_group(p);
 	}
 
-	pv = nbp_get_vlan_info(p);
 	if (vid) {
-		if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
-			pr_info("bridge: RTM_NEWNEIGH with unconfigured "
-				"vlan %d on port %s\n", vid, dev->name);
+		v = br_vlan_find(vg, vid);
+		if (!v || !br_vlan_should_use(v)) {
+			pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
 			return -EINVAL;
 		}
 
 		/* VID was specified, so use it. */
-		err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
+		if (dev->priv_flags & IFF_EBRIDGE)
+			err = br_fdb_insert(br, NULL, addr, vid);
+		else
+			err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
 	} else {
-		err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
-		if (err || !pv)
+		if (dev->priv_flags & IFF_EBRIDGE)
+			err = br_fdb_insert(br, NULL, addr, 0);
+		else
+			err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
+		if (err || !vg || !vg->num_vlans)
 			goto out;
 
 		/* We have vlans configured on this port and user didn't
 		 * specify a VLAN.  To be nice, add/update entry for every
 		 * vlan on this port.
 		 */
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-			err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
+		list_for_each_entry(v, &vg->vlan_list, vlist) {
+			if (!br_vlan_should_use(v))
+				continue;
+			if (dev->priv_flags & IFF_EBRIDGE)
+				err = br_fdb_insert(br, NULL, addr, v->vid);
+			else
+				err = __br_fdb_add(ndm, p, addr, nlh_flags,
+						   v->vid);
 			if (err)
 				goto out;
 		}
@@ -895,6 +920,32 @@
 	return err;
 }
 
+static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr,
+			      u16 vid)
+{
+	struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
+	struct net_bridge_fdb_entry *fdb;
+
+	fdb = fdb_find(head, addr, vid);
+	if (!fdb)
+		return -ENOENT;
+
+	fdb_delete(br, fdb);
+	return 0;
+}
+
+static int __br_fdb_delete_by_addr(struct net_bridge *br,
+				   const unsigned char *addr, u16 vid)
+{
+	int err;
+
+	spin_lock_bh(&br->hash_lock);
+	err = fdb_delete_by_addr(br, addr, vid);
+	spin_unlock_bh(&br->hash_lock);
+
+	return err;
+}
+
 static int fdb_delete_by_addr_and_port(struct net_bridge_port *p,
 				       const u8 *addr, u16 vlan)
 {
@@ -927,38 +978,53 @@
 		  struct net_device *dev,
 		  const unsigned char *addr, u16 vid)
 {
-	struct net_bridge_port *p;
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_port *p = NULL;
+	struct net_bridge_vlan *v;
+	struct net_bridge *br = NULL;
 	int err;
-	struct net_port_vlans *pv;
 
-	p = br_port_get_rtnl(dev);
-	if (p == NULL) {
-		pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
-			dev->name);
-		return -EINVAL;
+	if (dev->priv_flags & IFF_EBRIDGE) {
+		br = netdev_priv(dev);
+		vg = br_vlan_group(br);
+	} else {
+		p = br_port_get_rtnl(dev);
+		if (!p) {
+			pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
+				dev->name);
+			return -EINVAL;
+		}
+		vg = nbp_vlan_group(p);
 	}
 
-	pv = nbp_get_vlan_info(p);
 	if (vid) {
-		if (!pv || !test_bit(vid, pv->vlan_bitmap)) {
-			pr_info("bridge: RTM_DELNEIGH with unconfigured "
-				"vlan %d on port %s\n", vid, dev->name);
+		v = br_vlan_find(vg, vid);
+		if (!v) {
+			pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
 			return -EINVAL;
 		}
 
-		err = __br_fdb_delete(p, addr, vid);
+		if (dev->priv_flags & IFF_EBRIDGE)
+			err = __br_fdb_delete_by_addr(br, addr, vid);
+		else
+			err = __br_fdb_delete(p, addr, vid);
 	} else {
 		err = -ENOENT;
-		err &= __br_fdb_delete(p, addr, 0);
-		if (!pv)
+		if (dev->priv_flags & IFF_EBRIDGE)
+			err = __br_fdb_delete_by_addr(br, addr, 0);
+		else
+			err &= __br_fdb_delete(p, addr, 0);
+
+		if (!vg || !vg->num_vlans)
 			goto out;
 
-		/* We have vlans configured on this port and user didn't
-		 * specify a VLAN.  To be nice, add/update entry for every
-		 * vlan on this port.
-		 */
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-			err &= __br_fdb_delete(p, addr, vid);
+		list_for_each_entry(v, &vg->vlan_list, vlist) {
+			if (!br_vlan_should_use(v))
+				continue;
+			if (dev->priv_flags & IFF_EBRIDGE)
+				err = __br_fdb_delete_by_addr(br, addr, v->vid);
+			else
+				err &= __br_fdb_delete(p, addr, v->vid);
 		}
 	}
 out:
@@ -1034,7 +1100,7 @@
 	head = &br->hash[br_mac_hash(addr, vid)];
 	fdb = fdb_find(head, addr, vid);
 	if (!fdb) {
-		fdb = fdb_create(head, p, addr, vid);
+		fdb = fdb_create(head, p, addr, vid, 0, 0);
 		if (!fdb) {
 			err = -ENOMEM;
 			goto err_unlock;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 48afca7..fcdb86d 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -30,9 +30,11 @@
 static inline int should_deliver(const struct net_bridge_port *p,
 				 const struct sk_buff *skb)
 {
+	struct net_bridge_vlan_group *vg;
+
+	vg = nbp_vlan_group_rcu(p);
 	return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
-		br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
-		p->state == BR_STATE_FORWARDING;
+		br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING;
 }
 
 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -76,7 +78,10 @@
 
 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 {
-	skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
+	struct net_bridge_vlan_group *vg;
+
+	vg = nbp_vlan_group_rcu(to);
+	skb = br_handle_vlan(to->br, vg, skb);
 	if (!skb)
 		return;
 
@@ -99,6 +104,7 @@
 
 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
 {
+	struct net_bridge_vlan_group *vg;
 	struct net_device *indev;
 
 	if (skb_warn_if_lro(skb)) {
@@ -106,7 +112,8 @@
 		return;
 	}
 
-	skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
+	vg = nbp_vlan_group_rcu(to);
+	skb = br_handle_vlan(to->br, vg, skb);
 	if (!skb)
 		return;
 
@@ -134,7 +141,7 @@
 /* called with rcu_read_lock */
 void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
 {
-	if (should_deliver(to, skb)) {
+	if (to && should_deliver(to, skb)) {
 		if (skb0)
 			deliver_clone(to, skb, __br_forward);
 		else
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 45e4757..ec02f586 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <net/sock.h>
 #include <linux/if_vlan.h>
+#include <net/switchdev.h>
 
 #include "br_private.h"
 
@@ -250,6 +251,8 @@
 
 	nbp_vlan_flush(p);
 	br_fdb_delete_by_port(br, p, 0, 1);
+	switchdev_deferred_process();
+
 	nbp_update_port_count(br);
 
 	netdev_upper_dev_unlink(dev, br->dev);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 223f404..f7fba74 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -36,28 +36,28 @@
 {
 	struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
 	struct net_bridge *br = netdev_priv(brdev);
+	struct net_bridge_vlan_group *vg;
 	struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
-	struct net_port_vlans *pv;
 
 	u64_stats_update_begin(&brstats->syncp);
 	brstats->rx_packets++;
 	brstats->rx_bytes += skb->len;
 	u64_stats_update_end(&brstats->syncp);
 
+	vg = br_vlan_group_rcu(br);
 	/* Bridge is just like any other port.  Make sure the
 	 * packet is allowed except in promisc modue when someone
 	 * may be running packet capture.
 	 */
-	pv = br_get_vlan_info(br);
 	if (!(brdev->flags & IFF_PROMISC) &&
-	    !br_allowed_egress(br, pv, skb)) {
+	    !br_allowed_egress(vg, skb)) {
 		kfree_skb(skb);
 		return NET_RX_DROP;
 	}
 
 	indev = skb->dev;
 	skb->dev = brdev;
-	skb = br_handle_vlan(br, pv, skb);
+	skb = br_handle_vlan(br, vg, skb);
 	if (!skb)
 		return NET_RX_DROP;
 
@@ -140,7 +140,7 @@
 	if (!p || p->state == BR_STATE_DISABLED)
 		goto drop;
 
-	if (!br_allowed_ingress(p->br, nbp_get_vlan_info(p), skb, &vid))
+	if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid))
 		goto out;
 
 	/* insert into forwarding database after filtering to avoid spoofing */
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 8d423bc..263b4de 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -200,8 +200,7 @@
 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
 			return -EPERM;
 
-		br->ageing_time = clock_t_to_jiffies(args[1]);
-		return 0;
+		return br_set_ageing_time(br, args[1]);
 
 	case BRCTL_GET_PORT_INFO:
 	{
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index d747275..cd8deea 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -464,11 +464,11 @@
 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	struct net *net = sock_net(skb->sk);
-	unsigned short vid = VLAN_N_VID;
+	struct net_bridge_vlan_group *vg;
 	struct net_device *dev, *pdev;
 	struct br_mdb_entry *entry;
 	struct net_bridge_port *p;
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan *v;
 	struct net_bridge *br;
 	int err;
 
@@ -489,10 +489,10 @@
 	if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 		return -EINVAL;
 
-	pv = nbp_get_vlan_info(p);
-	if (br_vlan_enabled(br) && pv && entry->vid == 0) {
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-			entry->vid = vid;
+	vg = nbp_vlan_group(p);
+	if (br_vlan_enabled(br) && vg && entry->vid == 0) {
+		list_for_each_entry(v, &vg->vlan_list, vlist) {
+			entry->vid = v->vid;
 			err = __br_mdb_add(net, br, entry);
 			if (err)
 				break;
@@ -566,11 +566,11 @@
 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	struct net *net = sock_net(skb->sk);
-	unsigned short vid = VLAN_N_VID;
+	struct net_bridge_vlan_group *vg;
 	struct net_device *dev, *pdev;
 	struct br_mdb_entry *entry;
 	struct net_bridge_port *p;
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan *v;
 	struct net_bridge *br;
 	int err;
 
@@ -591,10 +591,10 @@
 	if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 		return -EINVAL;
 
-	pv = nbp_get_vlan_info(p);
-	if (br_vlan_enabled(br) && pv && entry->vid == 0) {
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-			entry->vid = vid;
+	vg = nbp_vlan_group(p);
+	if (br_vlan_enabled(br) && vg && entry->vid == 0) {
+		list_for_each_entry(v, &vg->vlan_list, vlist) {
+			entry->vid = v->vid;
 			err = __br_mdb_del(br, entry);
 			if (!err)
 				__br_mdb_notify(dev, entry, RTM_DELMDB);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index e21e44c..7ddbe7e 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -111,7 +111,6 @@
 /* largest possible L2 header, see br_nf_dev_queue_xmit() */
 #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
 
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 struct brnf_frag_data {
 	char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
 	u8 encap_size;
@@ -121,7 +120,6 @@
 };
 
 static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
-#endif
 
 static void nf_bridge_info_free(struct sk_buff *skb)
 {
@@ -189,10 +187,9 @@
  * expected format
  */
 
-static int br_validate_ipv4(struct sk_buff *skb)
+static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
 {
 	const struct iphdr *iph;
-	struct net_device *dev = skb->dev;
 	u32 len;
 
 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
@@ -213,13 +210,13 @@
 
 	len = ntohs(iph->tot_len);
 	if (skb->len < len) {
-		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
+		IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS);
 		goto drop;
 	} else if (len < (iph->ihl*4))
 		goto inhdr_error;
 
 	if (pskb_trim_rcsum(skb, len)) {
-		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
+		IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS);
 		goto drop;
 	}
 
@@ -232,7 +229,7 @@
 	return 0;
 
 inhdr_error:
-	IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
+	IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS);
 drop:
 	return -1;
 }
@@ -497,7 +494,7 @@
 
 	nf_bridge_pull_encap_header_rcsum(skb);
 
-	if (br_validate_ipv4(skb))
+	if (br_validate_ipv4(state->net, skb))
 		return NF_DROP;
 
 	nf_bridge_put(skb->nf_bridge);
@@ -609,13 +606,13 @@
 	}
 
 	if (pf == NFPROTO_IPV4) {
-		if (br_validate_ipv4(skb))
+		if (br_validate_ipv4(state->net, skb))
 			return NF_DROP;
 		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
 	}
 
 	if (pf == NFPROTO_IPV6) {
-		if (br_validate_ipv6(skb))
+		if (br_validate_ipv6(state->net, skb))
 			return NF_DROP;
 		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
 	}
@@ -667,7 +664,6 @@
 	return NF_STOLEN;
 }
 
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct brnf_frag_data *data;
@@ -692,17 +688,10 @@
 	nf_bridge_info_free(skb);
 	return br_dev_queue_push_xmit(net, sk, skb);
 }
-static int br_nf_push_frag_xmit_sk(struct sock *sk, struct sk_buff *skb)
-{
-	struct net *net = dev_net(skb_dst(skb)->dev);
-	return br_nf_push_frag_xmit(net, sk, skb);
-}
-#endif
 
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 static int
 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
-		  int (*output)(struct sock *, struct sk_buff *))
+		  int (*output)(struct net *, struct sock *, struct sk_buff *))
 {
 	unsigned int mtu = ip_skb_dst_mtu(skb);
 	struct iphdr *iph = ip_hdr(skb);
@@ -715,9 +704,8 @@
 		return -EMSGSIZE;
 	}
 
-	return ip_do_fragment(sk, skb, output);
+	return ip_do_fragment(net, sk, skb, output);
 }
-#endif
 
 static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
 {
@@ -740,14 +728,14 @@
 
 	nf_bridge = nf_bridge_info_get(skb);
 
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4)
 	/* This is wrong! We should preserve the original fragment
 	 * boundaries by preserving frag_list rather than refragmenting.
 	 */
-	if (skb->protocol == htons(ETH_P_IP)) {
+	if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) &&
+	    skb->protocol == htons(ETH_P_IP)) {
 		struct brnf_frag_data *data;
 
-		if (br_validate_ipv4(skb))
+		if (br_validate_ipv4(net, skb))
 			goto drop;
 
 		IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
@@ -764,15 +752,14 @@
 		skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
 						 data->size);
 
-		return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit_sk);
+		return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
 	}
-#endif
-#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
-	if (skb->protocol == htons(ETH_P_IPV6)) {
+	if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
+	    skb->protocol == htons(ETH_P_IPV6)) {
 		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
 		struct brnf_frag_data *data;
 
-		if (br_validate_ipv6(skb))
+		if (br_validate_ipv6(net, skb))
 			goto drop;
 
 		IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
@@ -787,12 +774,11 @@
 						 data->size);
 
 		if (v6ops)
-			return v6ops->fragment(sk, skb, br_nf_push_frag_xmit_sk);
+			return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
 
 		kfree_skb(skb);
 		return -EMSGSIZE;
 	}
-#endif
 	nf_bridge_info_free(skb);
 	return br_dev_queue_push_xmit(net, sk, skb);
  drop:
@@ -910,49 +896,42 @@
 static struct nf_hook_ops br_nf_ops[] __read_mostly = {
 	{
 		.hook = br_nf_pre_routing,
-		.owner = THIS_MODULE,
 		.pf = NFPROTO_BRIDGE,
 		.hooknum = NF_BR_PRE_ROUTING,
 		.priority = NF_BR_PRI_BRNF,
 	},
 	{
 		.hook = br_nf_local_in,
-		.owner = THIS_MODULE,
 		.pf = NFPROTO_BRIDGE,
 		.hooknum = NF_BR_LOCAL_IN,
 		.priority = NF_BR_PRI_BRNF,
 	},
 	{
 		.hook = br_nf_forward_ip,
-		.owner = THIS_MODULE,
 		.pf = NFPROTO_BRIDGE,
 		.hooknum = NF_BR_FORWARD,
 		.priority = NF_BR_PRI_BRNF - 1,
 	},
 	{
 		.hook = br_nf_forward_arp,
-		.owner = THIS_MODULE,
 		.pf = NFPROTO_BRIDGE,
 		.hooknum = NF_BR_FORWARD,
 		.priority = NF_BR_PRI_BRNF,
 	},
 	{
 		.hook = br_nf_post_routing,
-		.owner = THIS_MODULE,
 		.pf = NFPROTO_BRIDGE,
 		.hooknum = NF_BR_POST_ROUTING,
 		.priority = NF_BR_PRI_LAST,
 	},
 	{
 		.hook = ip_sabotage_in,
-		.owner = THIS_MODULE,
 		.pf = NFPROTO_IPV4,
 		.hooknum = NF_INET_PRE_ROUTING,
 		.priority = NF_IP_PRI_FIRST,
 	},
 	{
 		.hook = ip_sabotage_in,
-		.owner = THIS_MODULE,
 		.pf = NFPROTO_IPV6,
 		.hooknum = NF_INET_PRE_ROUTING,
 		.priority = NF_IP6_PRI_FIRST,
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index c51cc3f..d61f56e 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -100,10 +100,9 @@
 	return -1;
 }
 
-int br_validate_ipv6(struct sk_buff *skb)
+int br_validate_ipv6(struct net *net, struct sk_buff *skb)
 {
 	const struct ipv6hdr *hdr;
-	struct net_device *dev = skb->dev;
 	struct inet6_dev *idev = __in6_dev_get(skb->dev);
 	u32 pkt_len;
 	u8 ip6h_len = sizeof(struct ipv6hdr);
@@ -123,12 +122,12 @@
 
 	if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
 		if (pkt_len + ip6h_len > skb->len) {
-			IP6_INC_STATS_BH(dev_net(dev), idev,
+			IP6_INC_STATS_BH(net, idev,
 					 IPSTATS_MIB_INTRUNCATEDPKTS);
 			goto drop;
 		}
 		if (pskb_trim_rcsum(skb, pkt_len + ip6h_len)) {
-			IP6_INC_STATS_BH(dev_net(dev), idev,
+			IP6_INC_STATS_BH(net, idev,
 					 IPSTATS_MIB_INDISCARDS);
 			goto drop;
 		}
@@ -143,7 +142,7 @@
 	return 0;
 
 inhdr_error:
-	IP6_INC_STATS_BH(dev_net(dev), idev, IPSTATS_MIB_INHDRERRORS);
+	IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
 drop:
 	return -1;
 }
@@ -224,7 +223,7 @@
 {
 	struct nf_bridge_info *nf_bridge;
 
-	if (br_validate_ipv6(skb))
+	if (br_validate_ipv6(state->net, skb))
 		return NF_DROP;
 
 	nf_bridge_put(skb->nf_bridge);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index ea748c9..40197ff 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -21,36 +21,35 @@
 #include "br_private.h"
 #include "br_private_stp.h"
 
-static int br_get_num_vlan_infos(const struct net_port_vlans *pv,
-				 u32 filter_mask)
+static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
+				u32 filter_mask)
 {
-	u16 vid_range_start = 0, vid_range_end = 0;
-	u16 vid_range_flags = 0;
-	u16 pvid, vid, flags;
+	struct net_bridge_vlan *v;
+	u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
+	u16 flags, pvid;
 	int num_vlans = 0;
 
-	if (filter_mask & RTEXT_FILTER_BRVLAN)
-		return pv->num_vlans;
-
 	if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
 		return 0;
 
-	/* Count number of vlan info's
-	 */
-	pvid = br_get_pvid(pv);
-	for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+	pvid = br_get_pvid(vg);
+	/* Count number of vlan infos */
+	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
 		flags = 0;
-		if (vid == pvid)
+		/* only a context, bridge vlan not activated */
+		if (!br_vlan_should_use(v))
+			continue;
+		if (v->vid == pvid)
 			flags |= BRIDGE_VLAN_INFO_PVID;
 
-		if (test_bit(vid, pv->untagged_bitmap))
+		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 			flags |= BRIDGE_VLAN_INFO_UNTAGGED;
 
 		if (vid_range_start == 0) {
 			goto initvars;
-		} else if ((vid - vid_range_end) == 1 &&
+		} else if ((v->vid - vid_range_end) == 1 &&
 			flags == vid_range_flags) {
-			vid_range_end = vid;
+			vid_range_end = v->vid;
 			continue;
 		} else {
 			if ((vid_range_end - vid_range_start) > 0)
@@ -59,8 +58,8 @@
 				num_vlans += 1;
 		}
 initvars:
-		vid_range_start = vid;
-		vid_range_end = vid;
+		vid_range_start = v->vid;
+		vid_range_end = v->vid;
 		vid_range_flags = flags;
 	}
 
@@ -74,28 +73,43 @@
 	return num_vlans;
 }
 
+static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
+				 u32 filter_mask)
+{
+	int num_vlans;
+
+	if (!vg)
+		return 0;
+
+	if (filter_mask & RTEXT_FILTER_BRVLAN)
+		return vg->num_vlans;
+
+	rcu_read_lock();
+	num_vlans = __get_num_vlan_infos(vg, filter_mask);
+	rcu_read_unlock();
+
+	return num_vlans;
+}
+
 static size_t br_get_link_af_size_filtered(const struct net_device *dev,
 					   u32 filter_mask)
 {
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan_group *vg = NULL;
+	struct net_bridge_port *p;
+	struct net_bridge *br;
 	int num_vlan_infos;
 
 	rcu_read_lock();
-	if (br_port_exists(dev))
-		pv = nbp_get_vlan_info(br_port_get_rcu(dev));
-	else if (dev->priv_flags & IFF_EBRIDGE)
-		pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
-	else
-		pv = NULL;
-	if (pv)
-		num_vlan_infos = br_get_num_vlan_infos(pv, filter_mask);
-	else
-		num_vlan_infos = 0;
+	if (br_port_exists(dev)) {
+		p = br_port_get_rcu(dev);
+		vg = nbp_vlan_group_rcu(p);
+	} else if (dev->priv_flags & IFF_EBRIDGE) {
+		br = netdev_priv(dev);
+		vg = br_vlan_group_rcu(br);
+	}
+	num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
 	rcu_read_unlock();
 
-	if (!num_vlan_infos)
-		return 0;
-
 	/* Each VLAN is returned in bridge_vlan_info along with flags */
 	return num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
 }
@@ -113,6 +127,20 @@
 		+ nla_total_size(1)	/* IFLA_BRPORT_UNICAST_FLOOD */
 		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP */
 		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP_WIFI */
+		+ nla_total_size(sizeof(struct ifla_bridge_id))	/* IFLA_BRPORT_ROOT_ID */
+		+ nla_total_size(sizeof(struct ifla_bridge_id))	/* IFLA_BRPORT_BRIDGE_ID */
+		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_DESIGNATED_PORT */
+		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_DESIGNATED_COST */
+		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_ID */
+		+ nla_total_size(sizeof(u16))	/* IFLA_BRPORT_NO */
+		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
+		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_CONFIG_PENDING */
+		+ nla_total_size(sizeof(u64))	/* IFLA_BRPORT_MESSAGE_AGE_TIMER */
+		+ nla_total_size(sizeof(u64))	/* IFLA_BRPORT_FORWARD_DELAY_TIMER */
+		+ nla_total_size(sizeof(u64))	/* IFLA_BRPORT_HOLD_TIMER */
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+		+ nla_total_size(sizeof(u8))	/* IFLA_BRPORT_MULTICAST_ROUTER */
+#endif
 		+ 0;
 }
 
@@ -134,6 +162,7 @@
 			      const struct net_bridge_port *p)
 {
 	u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
+	u64 timerval;
 
 	if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
 	    nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
@@ -146,9 +175,36 @@
 	    nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) ||
 	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
 	    nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
-		       !!(p->flags & BR_PROXYARP_WIFI)))
+		       !!(p->flags & BR_PROXYARP_WIFI)) ||
+	    nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
+		    &p->designated_root) ||
+	    nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
+		    &p->designated_bridge) ||
+	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
+	    nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
+	    nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
+	    nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
+	    nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
+		       p->topology_change_ack) ||
+	    nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending))
 		return -EMSGSIZE;
 
+	timerval = br_timer_value(&p->message_age_timer);
+	if (nla_put_u64(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval))
+		return -EMSGSIZE;
+	timerval = br_timer_value(&p->forward_delay_timer);
+	if (nla_put_u64(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval))
+		return -EMSGSIZE;
+	timerval = br_timer_value(&p->hold_timer);
+	if (nla_put_u64(skb, IFLA_BRPORT_HOLD_TIMER, timerval))
+		return -EMSGSIZE;
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+	if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
+		       p->multicast_router))
+		return -EMSGSIZE;
+#endif
+
 	return 0;
 }
 
@@ -185,31 +241,33 @@
 }
 
 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
-					 const struct net_port_vlans *pv)
+					 struct net_bridge_vlan_group *vg)
 {
-	u16 vid_range_start = 0, vid_range_end = 0;
-	u16 vid_range_flags = 0;
-	u16 pvid, vid, flags;
+	struct net_bridge_vlan *v;
+	u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
+	u16 flags, pvid;
 	int err = 0;
 
 	/* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
 	 * and mark vlan info with begin and end flags
 	 * if vlaninfo represents a range
 	 */
-	pvid = br_get_pvid(pv);
-	for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+	pvid = br_get_pvid(vg);
+	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
 		flags = 0;
-		if (vid == pvid)
+		if (!br_vlan_should_use(v))
+			continue;
+		if (v->vid == pvid)
 			flags |= BRIDGE_VLAN_INFO_PVID;
 
-		if (test_bit(vid, pv->untagged_bitmap))
+		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 			flags |= BRIDGE_VLAN_INFO_UNTAGGED;
 
 		if (vid_range_start == 0) {
 			goto initvars;
-		} else if ((vid - vid_range_end) == 1 &&
+		} else if ((v->vid - vid_range_end) == 1 &&
 			flags == vid_range_flags) {
-			vid_range_end = vid;
+			vid_range_end = v->vid;
 			continue;
 		} else {
 			err = br_fill_ifvlaninfo_range(skb, vid_range_start,
@@ -220,8 +278,8 @@
 		}
 
 initvars:
-		vid_range_start = vid;
-		vid_range_end = vid;
+		vid_range_start = v->vid;
+		vid_range_end = v->vid;
 		vid_range_flags = flags;
 	}
 
@@ -238,19 +296,23 @@
 }
 
 static int br_fill_ifvlaninfo(struct sk_buff *skb,
-			      const struct net_port_vlans *pv)
+			      struct net_bridge_vlan_group *vg)
 {
 	struct bridge_vlan_info vinfo;
-	u16 pvid, vid;
+	struct net_bridge_vlan *v;
+	u16 pvid;
 
-	pvid = br_get_pvid(pv);
-	for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-		vinfo.vid = vid;
+	pvid = br_get_pvid(vg);
+	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
+		if (!br_vlan_should_use(v))
+			continue;
+
+		vinfo.vid = v->vid;
 		vinfo.flags = 0;
-		if (vid == pvid)
+		if (v->vid == pvid)
 			vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
 
-		if (test_bit(vid, pv->untagged_bitmap))
+		if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 			vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
 
 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
@@ -269,11 +331,11 @@
  * Contains port and master info as well as carrier and bridge state.
  */
 static int br_fill_ifinfo(struct sk_buff *skb,
-			  const struct net_bridge_port *port,
+			  struct net_bridge_port *port,
 			  u32 pid, u32 seq, int event, unsigned int flags,
 			  u32 filter_mask, const struct net_device *dev)
 {
-	const struct net_bridge *br;
+	struct net_bridge *br;
 	struct ifinfomsg *hdr;
 	struct nlmsghdr *nlh;
 	u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
@@ -320,26 +382,31 @@
 	/* Check if  the VID information is requested */
 	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
 	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
-		const struct net_port_vlans *pv;
+		struct net_bridge_vlan_group *vg;
 		struct nlattr *af;
 		int err;
 
+		/* RCU needed because of the VLAN locking rules (rcu || rtnl) */
+		rcu_read_lock();
 		if (port)
-			pv = nbp_get_vlan_info(port);
+			vg = nbp_vlan_group_rcu(port);
 		else
-			pv = br_get_vlan_info(br);
+			vg = br_vlan_group_rcu(br);
 
-		if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
+		if (!vg || !vg->num_vlans) {
+			rcu_read_unlock();
 			goto done;
-
+		}
 		af = nla_nest_start(skb, IFLA_AF_SPEC);
-		if (!af)
+		if (!af) {
+			rcu_read_unlock();
 			goto nla_put_failure;
-
+		}
 		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
-			err = br_fill_ifvlaninfo_compressed(skb, pv);
+			err = br_fill_ifvlaninfo_compressed(skb, vg);
 		else
-			err = br_fill_ifvlaninfo(skb, pv);
+			err = br_fill_ifvlaninfo(skb, vg);
+		rcu_read_unlock();
 		if (err)
 			goto nla_put_failure;
 		nla_nest_end(skb, af);
@@ -413,14 +480,14 @@
 	switch (cmd) {
 	case RTM_SETLINK:
 		if (p) {
+			/* if the MASTER flag is set this will act on the global
+			 * per-VLAN entry as well
+			 */
 			err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
 			if (err)
 				break;
-
-			if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
-				err = br_vlan_add(p->br, vinfo->vid,
-						  vinfo->flags);
 		} else {
+			vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
 			err = br_vlan_add(br, vinfo->vid, vinfo->flags);
 		}
 		break;
@@ -462,6 +529,9 @@
 			if (vinfo_start)
 				return -EINVAL;
 			vinfo_start = vinfo;
+			/* don't allow range of pvids */
+			if (vinfo_start->flags & BRIDGE_VLAN_INFO_PVID)
+				return -EINVAL;
 			continue;
 		}
 
@@ -507,6 +577,7 @@
 	[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
 	[IFLA_BRPORT_PROXYARP]	= { .type = NLA_U8 },
 	[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
+	[IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
 };
 
 /* Change the state of the port and notify spanning tree */
@@ -578,6 +649,18 @@
 			return err;
 	}
 
+	if (tb[IFLA_BRPORT_FLUSH])
+		br_fdb_delete_by_port(p->br, p, 0, 0);
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+	if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
+		u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
+
+		err = br_multicast_set_port_router(p, mcast_router);
+		if (err)
+			return err;
+	}
+#endif
 	br_port_flags_change(p, old_flags ^ p->flags);
 	return 0;
 }
@@ -744,6 +827,27 @@
 	[IFLA_BR_PRIORITY] = { .type = NLA_U16 },
 	[IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
 	[IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
+	[IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
+	[IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
+				 .len  = ETH_ALEN },
+	[IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
+	[IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
+	[IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
+	[IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
+	[IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
+	[IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
+	[IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
+	[IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
+	[IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
+	[IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
+	[IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
+	[IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
+	[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
+	[IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
+	[IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
+	[IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
+	[IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
+	[IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
 };
 
 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -774,9 +878,9 @@
 	}
 
 	if (data[IFLA_BR_AGEING_TIME]) {
-		u32 ageing_time = nla_get_u32(data[IFLA_BR_AGEING_TIME]);
-
-		br->ageing_time = clock_t_to_jiffies(ageing_time);
+		err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
+		if (err)
+			return err;
 	}
 
 	if (data[IFLA_BR_STP_STATE]) {
@@ -807,6 +911,158 @@
 		if (err)
 			return err;
 	}
+
+	if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
+		__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
+
+		err = __br_vlan_set_default_pvid(br, defpvid);
+		if (err)
+			return err;
+	}
+#endif
+
+	if (data[IFLA_BR_GROUP_FWD_MASK]) {
+		u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
+
+		if (fwd_mask & BR_GROUPFWD_RESTRICTED)
+			return -EINVAL;
+		br->group_fwd_mask = fwd_mask;
+	}
+
+	if (data[IFLA_BR_GROUP_ADDR]) {
+		u8 new_addr[ETH_ALEN];
+
+		if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
+			return -EINVAL;
+		memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
+		if (!is_link_local_ether_addr(new_addr))
+			return -EINVAL;
+		if (new_addr[5] == 1 ||		/* 802.3x Pause address */
+		    new_addr[5] == 2 ||		/* 802.3ad Slow protocols */
+		    new_addr[5] == 3)		/* 802.1X PAE address */
+			return -EINVAL;
+		spin_lock_bh(&br->lock);
+		memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
+		spin_unlock_bh(&br->lock);
+		br->group_addr_set = true;
+		br_recalculate_fwd_mask(br);
+	}
+
+	if (data[IFLA_BR_FDB_FLUSH])
+		br_fdb_flush(br);
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+	if (data[IFLA_BR_MCAST_ROUTER]) {
+		u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
+
+		err = br_multicast_set_router(br, multicast_router);
+		if (err)
+			return err;
+	}
+
+	if (data[IFLA_BR_MCAST_SNOOPING]) {
+		u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
+
+		err = br_multicast_toggle(br, mcast_snooping);
+		if (err)
+			return err;
+	}
+
+	if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
+		u8 val;
+
+		val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
+		br->multicast_query_use_ifaddr = !!val;
+	}
+
+	if (data[IFLA_BR_MCAST_QUERIER]) {
+		u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
+
+		err = br_multicast_set_querier(br, mcast_querier);
+		if (err)
+			return err;
+	}
+
+	if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) {
+		u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]);
+
+		br->hash_elasticity = val;
+	}
+
+	if (data[IFLA_BR_MCAST_HASH_MAX]) {
+		u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
+
+		err = br_multicast_set_hash_max(br, hash_max);
+		if (err)
+			return err;
+	}
+
+	if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
+		u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
+
+		br->multicast_last_member_count = val;
+	}
+
+	if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
+		u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
+
+		br->multicast_startup_query_count = val;
+	}
+
+	if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
+		u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
+
+		br->multicast_last_member_interval = clock_t_to_jiffies(val);
+	}
+
+	if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
+		u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
+
+		br->multicast_membership_interval = clock_t_to_jiffies(val);
+	}
+
+	if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
+		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
+
+		br->multicast_querier_interval = clock_t_to_jiffies(val);
+	}
+
+	if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
+		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
+
+		br->multicast_query_interval = clock_t_to_jiffies(val);
+	}
+
+	if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
+		u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
+
+		br->multicast_query_response_interval = clock_t_to_jiffies(val);
+	}
+
+	if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
+		u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
+
+		br->multicast_startup_query_interval = clock_t_to_jiffies(val);
+	}
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+	if (data[IFLA_BR_NF_CALL_IPTABLES]) {
+		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
+
+		br->nf_call_iptables = val ? true : false;
+	}
+
+	if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
+		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
+
+		br->nf_call_ip6tables = val ? true : false;
+	}
+
+	if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
+		u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
+
+		br->nf_call_arptables = val ? true : false;
+	}
 #endif
 
 	return 0;
@@ -823,6 +1079,40 @@
 	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_FILTERING */
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
 	       nla_total_size(sizeof(__be16)) +	/* IFLA_BR_VLAN_PROTOCOL */
+	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_VLAN_DEFAULT_PVID */
+#endif
+	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_GROUP_FWD_MASK */
+	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_ROOT_ID */
+	       nla_total_size(sizeof(struct ifla_bridge_id)) +   /* IFLA_BR_BRIDGE_ID */
+	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_ROOT_PORT */
+	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_ROOT_PATH_COST */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_HELLO_TIMER */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_TCN_TIMER */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_GC_TIMER */
+	       nla_total_size(ETH_ALEN) +       /* IFLA_BR_GROUP_ADDR */
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_ROUTER */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_SNOOPING */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_MCAST_QUERIER */
+	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_ELASTICITY */
+	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_HASH_MAX */
+	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
+	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_QUERIER_INTVL */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_QUERY_INTVL */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
+	       nla_total_size(sizeof(u64)) +    /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IPTABLES */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_IP6TABLES */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_NF_CALL_ARPTABLES */
 #endif
 	       0;
 }
@@ -837,6 +1127,20 @@
 	u32 stp_enabled = br->stp_enabled;
 	u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
 	u8 vlan_enabled = br_vlan_enabled(br);
+	u64 clockval;
+
+	clockval = br_timer_value(&br->hello_timer);
+	if (nla_put_u64(skb, IFLA_BR_HELLO_TIMER, clockval))
+		return -EMSGSIZE;
+	clockval = br_timer_value(&br->tcn_timer);
+	if (nla_put_u64(skb, IFLA_BR_TCN_TIMER, clockval))
+		return -EMSGSIZE;
+	clockval = br_timer_value(&br->topology_change_timer);
+	if (nla_put_u64(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval))
+		return -EMSGSIZE;
+	clockval = br_timer_value(&br->gc_timer);
+	if (nla_put_u64(skb, IFLA_BR_GC_TIMER, clockval))
+		return -EMSGSIZE;
 
 	if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
 	    nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
@@ -844,38 +1148,76 @@
 	    nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
 	    nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
 	    nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
-	    nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled))
+	    nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
+	    nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
+	    nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
+		    &br->bridge_id) ||
+	    nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
+		    &br->designated_root) ||
+	    nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
+	    nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
+	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
+	    nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
+		       br->topology_change_detected) ||
+	    nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr))
 		return -EMSGSIZE;
 
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
-	if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto))
+	if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
+	    nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid))
+		return -EMSGSIZE;
+#endif
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+	if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
+	    nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) ||
+	    nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
+		       br->multicast_query_use_ifaddr) ||
+	    nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) ||
+	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY,
+			br->hash_elasticity) ||
+	    nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
+	    nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
+			br->multicast_last_member_count) ||
+	    nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
+			br->multicast_startup_query_count))
+		return -EMSGSIZE;
+
+	clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
+	if (nla_put_u64(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval))
+		return -EMSGSIZE;
+	clockval = jiffies_to_clock_t(br->multicast_membership_interval);
+	if (nla_put_u64(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval))
+		return -EMSGSIZE;
+	clockval = jiffies_to_clock_t(br->multicast_querier_interval);
+	if (nla_put_u64(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval))
+		return -EMSGSIZE;
+	clockval = jiffies_to_clock_t(br->multicast_query_interval);
+	if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval))
+		return -EMSGSIZE;
+	clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
+	if (nla_put_u64(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval))
+		return -EMSGSIZE;
+	clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
+	if (nla_put_u64(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval))
+		return -EMSGSIZE;
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+	if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
+		       br->nf_call_iptables ? 1 : 0) ||
+	    nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
+		       br->nf_call_ip6tables ? 1 : 0) ||
+	    nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
+		       br->nf_call_arptables ? 1 : 0))
 		return -EMSGSIZE;
 #endif
 
 	return 0;
 }
 
-static size_t br_get_link_af_size(const struct net_device *dev)
-{
-	struct net_port_vlans *pv;
-
-	if (br_port_exists(dev))
-		pv = nbp_get_vlan_info(br_port_get_rtnl(dev));
-	else if (dev->priv_flags & IFF_EBRIDGE)
-		pv = br_get_vlan_info((struct net_bridge *)netdev_priv(dev));
-	else
-		return 0;
-
-	if (!pv)
-		return 0;
-
-	/* Each VLAN is returned in bridge_vlan_info along with flags */
-	return pv->num_vlans * nla_total_size(sizeof(struct bridge_vlan_info));
-}
 
 static struct rtnl_af_ops br_af_ops __read_mostly = {
 	.family			= AF_BRIDGE,
-	.get_link_af_size	= br_get_link_af_size,
+	.get_link_af_size	= br_get_link_af_size_filtered,
 };
 
 struct rtnl_link_ops br_link_ops __read_mostly = {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 74e99c7..216018c 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -20,6 +20,7 @@
 #include <net/route.h>
 #include <net/ip6_fib.h>
 #include <linux/if_vlan.h>
+#include <linux/rhashtable.h>
 
 #define BR_HASH_BITS 8
 #define BR_HASH_SIZE (1 << BR_HASH_BITS)
@@ -28,7 +29,6 @@
 
 #define BR_PORT_BITS	10
 #define BR_MAX_PORTS	(1<<BR_PORT_BITS)
-#define BR_VLAN_BITMAP_LEN	BITS_TO_LONGS(VLAN_N_VID)
 
 #define BR_VERSION	"2.3"
 
@@ -77,17 +77,61 @@
 };
 #endif
 
-struct net_port_vlans {
-	u16				port_idx;
-	u16				pvid;
+/**
+ * struct net_bridge_vlan - per-vlan entry
+ *
+ * @vnode: rhashtable member
+ * @vid: VLAN id
+ * @flags: bridge vlan flags
+ * @br: if MASTER flag set, this points to a bridge struct
+ * @port: if MASTER flag unset, this points to a port struct
+ * @refcnt: if MASTER flag set, this is bumped for each port referencing it
+ * @brvlan: if MASTER flag unset, this points to the global per-VLAN context
+ *          for this VLAN entry
+ * @vlist: sorted list of VLAN entries
+ * @rcu: used for entry destruction
+ *
+ * This structure is shared between the global per-VLAN entries contained in
+ * the bridge rhashtable and the local per-port per-VLAN entries contained in
+ * the port's rhashtable. The union entries should be interpreted depending on
+ * the entry flags that are set.
+ */
+struct net_bridge_vlan {
+	struct rhash_head		vnode;
+	u16				vid;
+	u16				flags;
 	union {
-		struct net_bridge_port		*port;
-		struct net_bridge		*br;
-	}				parent;
+		struct net_bridge	*br;
+		struct net_bridge_port	*port;
+	};
+	union {
+		atomic_t		refcnt;
+		struct net_bridge_vlan	*brvlan;
+	};
+	struct list_head		vlist;
+
 	struct rcu_head			rcu;
-	unsigned long			vlan_bitmap[BR_VLAN_BITMAP_LEN];
-	unsigned long			untagged_bitmap[BR_VLAN_BITMAP_LEN];
+};
+
+/**
+ * struct net_bridge_vlan_group
+ *
+ * @vlan_hash: VLAN entry rhashtable
+ * @vlan_list: sorted VLAN entry list
+ * @num_vlans: number of total VLAN entries
+ * @pvid: PVID VLAN id
+ *
+ * IMPORTANT: Be careful when checking if there're VLAN entries using list
+ *            primitives because the bridge can have entries in its list which
+ *            are just for global context but not for filtering, i.e. they have
+ *            the master flag set but not the brentry flag. If you have to check
+ *            if there're "real" entries in the bridge please test @num_vlans
+ */
+struct net_bridge_vlan_group {
+	struct rhashtable		vlan_hash;
+	struct list_head		vlan_list;
 	u16				num_vlans;
+	u16				pvid;
 };
 
 struct net_bridge_fdb_entry
@@ -185,7 +229,7 @@
 	struct netpoll			*np;
 #endif
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
-	struct net_port_vlans __rcu	*vlan_info;
+	struct net_bridge_vlan_group	__rcu *vlgrp;
 #endif
 };
 
@@ -293,10 +337,10 @@
 	struct kobject			*ifobj;
 	u32				auto_cnt;
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
+	struct net_bridge_vlan_group	__rcu *vlgrp;
 	u8				vlan_enabled;
 	__be16				vlan_proto;
 	u16				default_pvid;
-	struct net_port_vlans __rcu	*vlan_info;
 #endif
 };
 
@@ -344,6 +388,31 @@
 	return !memcmp(&br->bridge_id, &br->designated_root, 8);
 }
 
+/* check if a VLAN entry is global */
+static inline bool br_vlan_is_master(const struct net_bridge_vlan *v)
+{
+	return v->flags & BRIDGE_VLAN_INFO_MASTER;
+}
+
+/* check if a VLAN entry is used by the bridge */
+static inline bool br_vlan_is_brentry(const struct net_bridge_vlan *v)
+{
+	return v->flags & BRIDGE_VLAN_INFO_BRENTRY;
+}
+
+/* check if we should use the vlan entry, returns false if it's only context */
+static inline bool br_vlan_should_use(const struct net_bridge_vlan *v)
+{
+	if (br_vlan_is_master(v)) {
+		if (br_vlan_is_brentry(v))
+			return true;
+		else
+			return false;
+	}
+
+	return true;
+}
+
 /* br_device.c */
 void br_dev_setup(struct net_device *dev);
 void br_dev_delete(struct net_device *dev, struct list_head *list);
@@ -601,18 +670,19 @@
 
 /* br_vlan.c */
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
-bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
-			struct sk_buff *skb, u16 *vid);
-bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v,
+bool br_allowed_ingress(const struct net_bridge *br,
+			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
+			u16 *vid);
+bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 		       const struct sk_buff *skb);
 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
 struct sk_buff *br_handle_vlan(struct net_bridge *br,
-			       const struct net_port_vlans *v,
+			       struct net_bridge_vlan_group *vg,
 			       struct sk_buff *skb);
 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
 int br_vlan_delete(struct net_bridge *br, u16 vid);
 void br_vlan_flush(struct net_bridge *br);
-bool br_vlan_find(struct net_bridge *br, u16 vid);
+struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid);
 void br_recalculate_fwd_mask(struct net_bridge *br);
 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
@@ -620,22 +690,35 @@
 int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
 int br_vlan_init(struct net_bridge *br);
 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
+int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid);
 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
 void nbp_vlan_flush(struct net_bridge_port *port);
-bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
 int nbp_vlan_init(struct net_bridge_port *port);
+int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
 
-static inline struct net_port_vlans *br_get_vlan_info(
-						const struct net_bridge *br)
+static inline struct net_bridge_vlan_group *br_vlan_group(
+					const struct net_bridge *br)
 {
-	return rcu_dereference_rtnl(br->vlan_info);
+	return rtnl_dereference(br->vlgrp);
 }
 
-static inline struct net_port_vlans *nbp_get_vlan_info(
-						const struct net_bridge_port *p)
+static inline struct net_bridge_vlan_group *nbp_vlan_group(
+					const struct net_bridge_port *p)
 {
-	return rcu_dereference_rtnl(p->vlan_info);
+	return rtnl_dereference(p->vlgrp);
+}
+
+static inline struct net_bridge_vlan_group *br_vlan_group_rcu(
+					const struct net_bridge *br)
+{
+	return rcu_dereference(br->vlgrp);
+}
+
+static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu(
+					const struct net_bridge_port *p)
+{
+	return rcu_dereference(p->vlgrp);
 }
 
 /* Since bridge now depends on 8021Q module, but the time bridge sees the
@@ -645,9 +728,9 @@
 {
 	int err = 0;
 
-	if (skb_vlan_tag_present(skb))
+	if (skb_vlan_tag_present(skb)) {
 		*vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
-	else {
+	} else {
 		*vid = 0;
 		err = -EINVAL;
 	}
@@ -655,13 +738,13 @@
 	return err;
 }
 
-static inline u16 br_get_pvid(const struct net_port_vlans *v)
+static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg)
 {
-	if (!v)
+	if (!vg)
 		return 0;
 
 	smp_rmb();
-	return v->pvid;
+	return vg->pvid;
 }
 
 static inline int br_vlan_enabled(struct net_bridge *br)
@@ -669,16 +752,15 @@
 	return br->vlan_enabled;
 }
 #else
-static inline bool br_allowed_ingress(struct net_bridge *br,
-				      struct net_port_vlans *v,
+static inline bool br_allowed_ingress(const struct net_bridge *br,
+				      struct net_bridge_vlan_group *vg,
 				      struct sk_buff *skb,
 				      u16 *vid)
 {
 	return true;
 }
 
-static inline bool br_allowed_egress(struct net_bridge *br,
-				     const struct net_port_vlans *v,
+static inline bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 				     const struct sk_buff *skb)
 {
 	return true;
@@ -691,7 +773,7 @@
 }
 
 static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
-					     const struct net_port_vlans *v,
+					     struct net_bridge_vlan_group *vg,
 					     struct sk_buff *skb)
 {
 	return skb;
@@ -711,11 +793,6 @@
 {
 }
 
-static inline bool br_vlan_find(struct net_bridge *br, u16 vid)
-{
-	return false;
-}
-
 static inline void br_recalculate_fwd_mask(struct net_bridge *br)
 {
 }
@@ -739,21 +816,11 @@
 {
 }
 
-static inline struct net_port_vlans *br_get_vlan_info(
-						const struct net_bridge *br)
+static inline struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg,
+						   u16 vid)
 {
 	return NULL;
 }
-static inline struct net_port_vlans *nbp_get_vlan_info(
-						const struct net_bridge_port *p)
-{
-	return NULL;
-}
-
-static inline bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
-{
-	return false;
-}
 
 static inline int nbp_vlan_init(struct net_bridge_port *port)
 {
@@ -764,7 +831,8 @@
 {
 	return 0;
 }
-static inline u16 br_get_pvid(const struct net_port_vlans *v)
+
+static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg)
 {
 	return 0;
 }
@@ -779,6 +847,37 @@
 {
 	return -EOPNOTSUPP;
 }
+
+static inline int nbp_get_num_vlan_infos(struct net_bridge_port *p,
+					 u32 filter_mask)
+{
+	return 0;
+}
+
+static inline struct net_bridge_vlan_group *br_vlan_group(
+					const struct net_bridge *br)
+{
+	return NULL;
+}
+
+static inline struct net_bridge_vlan_group *nbp_vlan_group(
+					const struct net_bridge_port *p)
+{
+	return NULL;
+}
+
+static inline struct net_bridge_vlan_group *br_vlan_group_rcu(
+					const struct net_bridge *br)
+{
+	return NULL;
+}
+
+static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu(
+					const struct net_bridge_port *p)
+{
+	return NULL;
+}
+
 #endif
 
 struct nf_br_ops {
@@ -808,6 +907,7 @@
 int br_set_forward_delay(struct net_bridge *br, unsigned long x);
 int br_set_hello_time(struct net_bridge *br, unsigned long x);
 int br_set_max_age(struct net_bridge *br, unsigned long x);
+int br_set_ageing_time(struct net_bridge *br, u32 ageing_time);
 
 
 /* br_stp_if.c */
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 3a7392e..80c34d7 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -40,14 +40,15 @@
 void br_set_state(struct net_bridge_port *p, unsigned int state)
 {
 	struct switchdev_attr attr = {
-		.id = SWITCHDEV_ATTR_PORT_STP_STATE,
+		.id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+		.flags = SWITCHDEV_F_DEFER,
 		.u.stp_state = state,
 	};
 	int err;
 
 	p->state = state;
 	err = switchdev_port_attr_set(p->dev, &attr);
-	if (err && err != -EOPNOTSUPP)
+	if (err)
 		br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
 				(unsigned int) p->port_no, p->dev->name);
 }
@@ -566,6 +567,29 @@
 
 }
 
+int br_set_ageing_time(struct net_bridge *br, u32 ageing_time)
+{
+	struct switchdev_attr attr = {
+		.id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
+		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
+		.u.ageing_time = ageing_time,
+	};
+	unsigned long t = clock_t_to_jiffies(ageing_time);
+	int err;
+
+	if (t < BR_MIN_AGEING_TIME || t > BR_MAX_AGEING_TIME)
+		return -ERANGE;
+
+	err = switchdev_port_attr_set(br->dev, &attr);
+	if (err)
+		return err;
+
+	br->ageing_time = t;
+	mod_timer(&br->gc_timer, jiffies);
+
+	return 0;
+}
+
 void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
 {
 	br->bridge_forward_delay = t;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 4ca449a..fa53d7a 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -15,6 +15,7 @@
 #include <linux/kmod.h>
 #include <linux/etherdevice.h>
 #include <linux/rtnetlink.h>
+#include <net/switchdev.h>
 
 #include "br_private.h"
 #include "br_private_stp.h"
@@ -35,11 +36,22 @@
 /* called under bridge lock */
 void br_init_port(struct net_bridge_port *p)
 {
+	struct switchdev_attr attr = {
+		.id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
+		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP | SWITCHDEV_F_DEFER,
+		.u.ageing_time = p->br->ageing_time,
+	};
+	int err;
+
 	p->port_id = br_make_port_id(p->priority, p->port_no);
 	br_become_designated_port(p);
 	br_set_state(p, BR_STATE_BLOCKING);
 	p->topology_change_ack = 0;
 	p->config_pending = 0;
+
+	err = switchdev_port_attr_set(p->dev, &attr);
+	if (err)
+		netdev_err(p->dev, "failed to set HW ageing time\n");
 }
 
 /* called under bridge lock */
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 4c97fc5..8365bd5 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -102,8 +102,15 @@
 
 static int set_ageing_time(struct net_bridge *br, unsigned long val)
 {
-	br->ageing_time = clock_t_to_jiffies(val);
-	return 0;
+	int ret;
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	ret = br_set_ageing_time(br, val);
+	rtnl_unlock();
+
+	return ret;
 }
 
 static ssize_t ageing_time_store(struct device *d,
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 5f5a02b..5f0d0cc 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -6,86 +6,205 @@
 
 #include "br_private.h"
 
-static void __vlan_add_pvid(struct net_port_vlans *v, u16 vid)
+static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
+			      const void *ptr)
 {
-	if (v->pvid == vid)
+	const struct net_bridge_vlan *vle = ptr;
+	u16 vid = *(u16 *)arg->key;
+
+	return vle->vid != vid;
+}
+
+static const struct rhashtable_params br_vlan_rht_params = {
+	.head_offset = offsetof(struct net_bridge_vlan, vnode),
+	.key_offset = offsetof(struct net_bridge_vlan, vid),
+	.key_len = sizeof(u16),
+	.nelem_hint = 3,
+	.locks_mul = 1,
+	.max_size = VLAN_N_VID,
+	.obj_cmpfn = br_vlan_cmp,
+	.automatic_shrinking = true,
+};
+
+static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
+{
+	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
+}
+
+static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
+{
+	if (vg->pvid == vid)
 		return;
 
 	smp_wmb();
-	v->pvid = vid;
+	vg->pvid = vid;
 }
 
-static void __vlan_delete_pvid(struct net_port_vlans *v, u16 vid)
+static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
 {
-	if (v->pvid != vid)
+	if (vg->pvid != vid)
 		return;
 
 	smp_wmb();
-	v->pvid = 0;
+	vg->pvid = 0;
 }
 
-static void __vlan_add_flags(struct net_port_vlans *v, u16 vid, u16 flags)
+static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
 {
-	if (flags & BRIDGE_VLAN_INFO_PVID)
-		__vlan_add_pvid(v, vid);
+	struct net_bridge_vlan_group *vg;
+
+	if (br_vlan_is_master(v))
+		vg = br_vlan_group(v->br);
 	else
-		__vlan_delete_pvid(v, vid);
+		vg = nbp_vlan_group(v->port);
+
+	if (flags & BRIDGE_VLAN_INFO_PVID)
+		__vlan_add_pvid(vg, v->vid);
+	else
+		__vlan_delete_pvid(vg, v->vid);
 
 	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
-		set_bit(vid, v->untagged_bitmap);
+		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
 	else
-		clear_bit(vid, v->untagged_bitmap);
+		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
 }
 
 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
 			  u16 vid, u16 flags)
 {
-	const struct net_device_ops *ops = dev->netdev_ops;
+	struct switchdev_obj_port_vlan v = {
+		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+		.flags = flags,
+		.vid_begin = vid,
+		.vid_end = vid,
+	};
 	int err;
 
-	/* If driver uses VLAN ndo ops, use 8021q to install vid
-	 * on device, otherwise try switchdev ops to install vid.
+	/* Try switchdev op first. In case it is not supported, fallback to
+	 * 8021q add.
 	 */
-
-	if (ops->ndo_vlan_rx_add_vid) {
-		err = vlan_vid_add(dev, br->vlan_proto, vid);
-	} else {
-		struct switchdev_obj vlan_obj = {
-			.id = SWITCHDEV_OBJ_PORT_VLAN,
-			.u.vlan = {
-				.flags = flags,
-				.vid_begin = vid,
-				.vid_end = vid,
-			},
-		};
-
-		err = switchdev_port_obj_add(dev, &vlan_obj);
-		if (err == -EOPNOTSUPP)
-			err = 0;
-	}
-
+	err = switchdev_port_obj_add(dev, &v.obj);
+	if (err == -EOPNOTSUPP)
+		return vlan_vid_add(dev, br->vlan_proto, vid);
 	return err;
 }
 
-static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
+static void __vlan_add_list(struct net_bridge_vlan *v)
 {
-	struct net_bridge_port *p = NULL;
-	struct net_bridge *br;
-	struct net_device *dev;
+	struct net_bridge_vlan_group *vg;
+	struct list_head *headp, *hpos;
+	struct net_bridge_vlan *vent;
+
+	if (br_vlan_is_master(v))
+		vg = br_vlan_group(v->br);
+	else
+		vg = nbp_vlan_group(v->port);
+
+	headp = &vg->vlan_list;
+	list_for_each_prev(hpos, headp) {
+		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
+		if (v->vid < vent->vid)
+			continue;
+		else
+			break;
+	}
+	list_add_rcu(&v->vlist, hpos);
+}
+
+static void __vlan_del_list(struct net_bridge_vlan *v)
+{
+	list_del_rcu(&v->vlist);
+}
+
+static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
+			  u16 vid)
+{
+	struct switchdev_obj_port_vlan v = {
+		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+		.vid_begin = vid,
+		.vid_end = vid,
+	};
 	int err;
 
-	if (test_bit(vid, v->vlan_bitmap)) {
-		__vlan_add_flags(v, vid, flags);
+	/* Try switchdev op first. In case it is not supported, fallback to
+	 * 8021q del.
+	 */
+	err = switchdev_port_obj_del(dev, &v.obj);
+	if (err == -EOPNOTSUPP) {
+		vlan_vid_del(dev, br->vlan_proto, vid);
 		return 0;
 	}
+	return err;
+}
 
-	if (v->port_idx) {
-		p = v->parent.port;
+/* Returns a master vlan, if it didn't exist it gets created. In all cases a
+ * a reference is taken to the master vlan before returning.
+ */
+static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
+{
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_vlan *masterv;
+
+	vg = br_vlan_group(br);
+	masterv = br_vlan_find(vg, vid);
+	if (!masterv) {
+		/* missing global ctx, create it now */
+		if (br_vlan_add(br, vid, 0))
+			return NULL;
+		masterv = br_vlan_find(vg, vid);
+		if (WARN_ON(!masterv))
+			return NULL;
+	}
+	atomic_inc(&masterv->refcnt);
+
+	return masterv;
+}
+
+static void br_vlan_put_master(struct net_bridge_vlan *masterv)
+{
+	struct net_bridge_vlan_group *vg;
+
+	if (!br_vlan_is_master(masterv))
+		return;
+
+	vg = br_vlan_group(masterv->br);
+	if (atomic_dec_and_test(&masterv->refcnt)) {
+		rhashtable_remove_fast(&vg->vlan_hash,
+				       &masterv->vnode, br_vlan_rht_params);
+		__vlan_del_list(masterv);
+		kfree_rcu(masterv, rcu);
+	}
+}
+
+/* This is the shared VLAN add function which works for both ports and bridge
+ * devices. There are four possible calls to this function in terms of the
+ * vlan entry type:
+ * 1. vlan is being added on a port (no master flags, global entry exists)
+ * 2. vlan is being added on a bridge (both master and brvlan flags)
+ * 3. vlan is being added on a port, but a global entry didn't exist which
+ *    is being created right now (master flag set, brvlan flag unset), the
+ *    global entry is used for global per-vlan features, but not for filtering
+ * 4. same as 3 but with both master and brvlan flags set so the entry
+ *    will be used for filtering in both the port and the bridge
+ */
+static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
+{
+	struct net_bridge_vlan *masterv = NULL;
+	struct net_bridge_port *p = NULL;
+	struct net_bridge_vlan_group *vg;
+	struct net_device *dev;
+	struct net_bridge *br;
+	int err;
+
+	if (br_vlan_is_master(v)) {
+		br = v->br;
+		dev = br->dev;
+		vg = br_vlan_group(br);
+	} else {
+		p = v->port;
 		br = p->br;
 		dev = p->dev;
-	} else {
-		br = v->parent.br;
-		dev = br->dev;
+		vg = nbp_vlan_group(p);
 	}
 
 	if (p) {
@@ -93,116 +212,140 @@
 		 * This ensures tagged traffic enters the bridge when
 		 * promiscuous mode is disabled by br_manage_promisc().
 		 */
-		err = __vlan_vid_add(dev, br, vid, flags);
+		err = __vlan_vid_add(dev, br, v->vid, flags);
 		if (err)
-			return err;
+			goto out;
+
+		/* need to work on the master vlan too */
+		if (flags & BRIDGE_VLAN_INFO_MASTER) {
+			err = br_vlan_add(br, v->vid, flags |
+						      BRIDGE_VLAN_INFO_BRENTRY);
+			if (err)
+				goto out_filt;
+		}
+
+		masterv = br_vlan_get_master(br, v->vid);
+		if (!masterv)
+			goto out_filt;
+		v->brvlan = masterv;
 	}
 
-	err = br_fdb_insert(br, p, dev->dev_addr, vid);
-	if (err) {
-		br_err(br, "failed insert local address into bridge "
-		       "forwarding table\n");
-		goto out_filt;
+	/* Add the dev mac and count the vlan only if it's usable */
+	if (br_vlan_should_use(v)) {
+		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
+		if (err) {
+			br_err(br, "failed insert local address into bridge forwarding table\n");
+			goto out_filt;
+		}
+		vg->num_vlans++;
 	}
 
-	set_bit(vid, v->vlan_bitmap);
-	v->num_vlans++;
-	__vlan_add_flags(v, vid, flags);
+	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
+					    br_vlan_rht_params);
+	if (err)
+		goto out_fdb_insert;
 
-	return 0;
+	__vlan_add_list(v);
+	__vlan_add_flags(v, flags);
+out:
+	return err;
+
+out_fdb_insert:
+	if (br_vlan_should_use(v)) {
+		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
+		vg->num_vlans--;
+	}
 
 out_filt:
-	if (p)
-		vlan_vid_del(dev, br->vlan_proto, vid);
-	return err;
+	if (p) {
+		__vlan_vid_del(dev, br, v->vid);
+		if (masterv) {
+			br_vlan_put_master(masterv);
+			v->brvlan = NULL;
+		}
+	}
+
+	goto out;
 }
 
-static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
-			  u16 vid)
+static int __vlan_del(struct net_bridge_vlan *v)
 {
-	const struct net_device_ops *ops = dev->netdev_ops;
+	struct net_bridge_vlan *masterv = v;
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_port *p = NULL;
 	int err = 0;
 
-	/* If driver uses VLAN ndo ops, use 8021q to delete vid
-	 * on device, otherwise try switchdev ops to delete vid.
-	 */
-
-	if (ops->ndo_vlan_rx_kill_vid) {
-		vlan_vid_del(dev, br->vlan_proto, vid);
+	if (br_vlan_is_master(v)) {
+		vg = br_vlan_group(v->br);
 	} else {
-		struct switchdev_obj vlan_obj = {
-			.id = SWITCHDEV_OBJ_PORT_VLAN,
-			.u.vlan = {
-				.vid_begin = vid,
-				.vid_end = vid,
-			},
-		};
-
-		err = switchdev_port_obj_del(dev, &vlan_obj);
-		if (err == -EOPNOTSUPP)
-			err = 0;
+		p = v->port;
+		vg = nbp_vlan_group(v->port);
+		masterv = v->brvlan;
 	}
 
+	__vlan_delete_pvid(vg, v->vid);
+	if (p) {
+		err = __vlan_vid_del(p->dev, p->br, v->vid);
+		if (err)
+			goto out;
+	}
+
+	if (br_vlan_should_use(v)) {
+		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
+		vg->num_vlans--;
+	}
+
+	if (masterv != v) {
+		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
+				       br_vlan_rht_params);
+		__vlan_del_list(v);
+		kfree_rcu(v, rcu);
+	}
+
+	br_vlan_put_master(masterv);
+out:
 	return err;
 }
 
-static int __vlan_del(struct net_port_vlans *v, u16 vid)
+static void __vlan_group_free(struct net_bridge_vlan_group *vg)
 {
-	if (!test_bit(vid, v->vlan_bitmap))
-		return -EINVAL;
-
-	__vlan_delete_pvid(v, vid);
-	clear_bit(vid, v->untagged_bitmap);
-
-	if (v->port_idx) {
-		struct net_bridge_port *p = v->parent.port;
-		int err;
-
-		err = __vlan_vid_del(p->dev, p->br, vid);
-		if (err)
-			return err;
-	}
-
-	clear_bit(vid, v->vlan_bitmap);
-	v->num_vlans--;
-	if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
-		if (v->port_idx)
-			RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
-		else
-			RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
-		kfree_rcu(v, rcu);
-	}
-	return 0;
+	WARN_ON(!list_empty(&vg->vlan_list));
+	rhashtable_destroy(&vg->vlan_hash);
+	kfree(vg);
 }
 
-static void __vlan_flush(struct net_port_vlans *v)
+static void __vlan_flush(struct net_bridge_vlan_group *vg)
 {
-	smp_wmb();
-	v->pvid = 0;
-	bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
-	if (v->port_idx)
-		RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
-	else
-		RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
-	kfree_rcu(v, rcu);
+	struct net_bridge_vlan *vlan, *tmp;
+
+	__vlan_delete_pvid(vg, vg->pvid);
+	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
+		__vlan_del(vlan);
 }
 
 struct sk_buff *br_handle_vlan(struct net_bridge *br,
-			       const struct net_port_vlans *pv,
+			       struct net_bridge_vlan_group *vg,
 			       struct sk_buff *skb)
 {
+	struct net_bridge_vlan *v;
 	u16 vid;
 
 	/* If this packet was not filtered at input, let it pass */
 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 		goto out;
 
-	/* Vlan filter table must be configured at this point.  The
+	/* At this point, we know that the frame was filtered and contains
+	 * a valid vlan id.  If the vlan id has untagged flag set,
+	 * send untagged; otherwise, send tagged.
+	 */
+	br_vlan_get_tag(skb, &vid);
+	v = br_vlan_find(vg, vid);
+	/* Vlan entry must be configured at this point.  The
 	 * only exception is the bridge is set in promisc mode and the
 	 * packet is destined for the bridge device.  In this case
 	 * pass the packet as is.
 	 */
-	if (!pv) {
+	if (!v || !br_vlan_should_use(v)) {
 		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
 			goto out;
 		} else {
@@ -210,13 +353,7 @@
 			return NULL;
 		}
 	}
-
-	/* At this point, we know that the frame was filtered and contains
-	 * a valid vlan id.  If the vlan id is set in the untagged bitmap,
-	 * send untagged; otherwise, send tagged.
-	 */
-	br_vlan_get_tag(skb, &vid);
-	if (test_bit(vid, pv->untagged_bitmap))
+	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 		skb->vlan_tci = 0;
 
 out:
@@ -224,29 +361,13 @@
 }
 
 /* Called under RCU */
-bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
-			struct sk_buff *skb, u16 *vid)
+static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
+			      struct sk_buff *skb, u16 *vid)
 {
+	const struct net_bridge_vlan *v;
 	bool tagged;
-	__be16 proto;
-
-	/* If VLAN filtering is disabled on the bridge, all packets are
-	 * permitted.
-	 */
-	if (!br->vlan_enabled) {
-		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
-		return true;
-	}
-
-	/* If there are no vlan in the permitted list, all packets are
-	 * rejected.
-	 */
-	if (!v)
-		goto drop;
 
 	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
-	proto = br->vlan_proto;
-
 	/* If vlan tx offload is disabled on bridge device and frame was
 	 * sent from vlan device on the bridge device, it does not have
 	 * HW accelerated vlan tag.
@@ -281,7 +402,7 @@
 	}
 
 	if (!*vid) {
-		u16 pvid = br_get_pvid(v);
+		u16 pvid = br_get_pvid(vg);
 
 		/* Frame had a tag with VID 0 or did not have a tag.
 		 * See if pvid is set on this port.  That tells us which
@@ -309,29 +430,43 @@
 	}
 
 	/* Frame had a valid vlan tag.  See if vlan is allowed */
-	if (test_bit(*vid, v->vlan_bitmap))
+	v = br_vlan_find(vg, *vid);
+	if (v && br_vlan_should_use(v))
 		return true;
 drop:
 	kfree_skb(skb);
 	return false;
 }
 
+bool br_allowed_ingress(const struct net_bridge *br,
+			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
+			u16 *vid)
+{
+	/* If VLAN filtering is disabled on the bridge, all packets are
+	 * permitted.
+	 */
+	if (!br->vlan_enabled) {
+		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
+		return true;
+	}
+
+	return __allowed_ingress(vg, br->vlan_proto, skb, vid);
+}
+
 /* Called under RCU. */
-bool br_allowed_egress(struct net_bridge *br,
-		       const struct net_port_vlans *v,
+bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 		       const struct sk_buff *skb)
 {
+	const struct net_bridge_vlan *v;
 	u16 vid;
 
 	/* If this packet was not filtered at input, let it pass */
 	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 		return true;
 
-	if (!v)
-		return false;
-
 	br_vlan_get_tag(skb, &vid);
-	if (test_bit(vid, v->vlan_bitmap))
+	v = br_vlan_find(vg, vid);
+	if (v && br_vlan_should_use(v))
 		return true;
 
 	return false;
@@ -340,29 +475,29 @@
 /* Called under RCU */
 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
 {
+	struct net_bridge_vlan_group *vg;
 	struct net_bridge *br = p->br;
-	struct net_port_vlans *v;
 
 	/* If filtering was disabled at input, let it pass. */
 	if (!br->vlan_enabled)
 		return true;
 
-	v = rcu_dereference(p->vlan_info);
-	if (!v)
+	vg = nbp_vlan_group(p);
+	if (!vg || !vg->num_vlans)
 		return false;
 
 	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
 		*vid = 0;
 
 	if (!*vid) {
-		*vid = br_get_pvid(v);
+		*vid = br_get_pvid(vg);
 		if (!*vid)
 			return false;
 
 		return true;
 	}
 
-	if (test_bit(*vid, v->vlan_bitmap))
+	if (br_vlan_find(vg, *vid))
 		return true;
 
 	return false;
@@ -373,31 +508,49 @@
  */
 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
 {
-	struct net_port_vlans *pv = NULL;
-	int err;
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_vlan *vlan;
+	int ret;
 
 	ASSERT_RTNL();
 
-	pv = rtnl_dereference(br->vlan_info);
-	if (pv)
-		return __vlan_add(pv, vid, flags);
+	vg = br_vlan_group(br);
+	vlan = br_vlan_find(vg, vid);
+	if (vlan) {
+		if (!br_vlan_is_brentry(vlan)) {
+			/* Trying to change flags of non-existent bridge vlan */
+			if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
+				return -EINVAL;
+			/* It was only kept for port vlans, now make it real */
+			ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
+					    vlan->vid);
+			if (ret) {
+				br_err(br, "failed insert local address into bridge forwarding table\n");
+				return ret;
+			}
+			atomic_inc(&vlan->refcnt);
+			vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
+			vg->num_vlans++;
+		}
+		__vlan_add_flags(vlan, flags);
+		return 0;
+	}
 
-	/* Create port vlan infomration
-	 */
-	pv = kzalloc(sizeof(*pv), GFP_KERNEL);
-	if (!pv)
+	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+	if (!vlan)
 		return -ENOMEM;
 
-	pv->parent.br = br;
-	err = __vlan_add(pv, vid, flags);
-	if (err)
-		goto out;
+	vlan->vid = vid;
+	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
+	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
+	vlan->br = br;
+	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
+		atomic_set(&vlan->refcnt, 1);
+	ret = __vlan_add(vlan, flags);
+	if (ret)
+		kfree(vlan);
 
-	rcu_assign_pointer(br->vlan_info, pv);
-	return 0;
-out:
-	kfree(pv);
-	return err;
+	return ret;
 }
 
 /* Must be protected by RTNL.
@@ -405,49 +558,41 @@
  */
 int br_vlan_delete(struct net_bridge *br, u16 vid)
 {
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan_group *vg;
+	struct net_bridge_vlan *v;
 
 	ASSERT_RTNL();
 
-	pv = rtnl_dereference(br->vlan_info);
-	if (!pv)
-		return -EINVAL;
+	vg = br_vlan_group(br);
+	v = br_vlan_find(vg, vid);
+	if (!v || !br_vlan_is_brentry(v))
+		return -ENOENT;
 
 	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
+	br_fdb_delete_by_port(br, NULL, vid, 0);
 
-	__vlan_del(pv, vid);
-	return 0;
+	return __vlan_del(v);
 }
 
 void br_vlan_flush(struct net_bridge *br)
 {
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan_group *vg;
 
 	ASSERT_RTNL();
-	pv = rtnl_dereference(br->vlan_info);
-	if (!pv)
-		return;
 
-	__vlan_flush(pv);
+	vg = br_vlan_group(br);
+	__vlan_flush(vg);
+	RCU_INIT_POINTER(br->vlgrp, NULL);
+	synchronize_rcu();
+	__vlan_group_free(vg);
 }
 
-bool br_vlan_find(struct net_bridge *br, u16 vid)
+struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
 {
-	struct net_port_vlans *pv;
-	bool found = false;
+	if (!vg)
+		return NULL;
 
-	rcu_read_lock();
-	pv = rcu_dereference(br->vlan_info);
-
-	if (!pv)
-		goto out;
-
-	if (test_bit(vid, pv->vlan_bitmap))
-		found = true;
-
-out:
-	rcu_read_unlock();
-	return found;
+	return br_vlan_lookup(&vg->vlan_hash, vid);
 }
 
 /* Must be protected by RTNL. */
@@ -505,21 +650,18 @@
 {
 	int err = 0;
 	struct net_bridge_port *p;
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan *vlan;
+	struct net_bridge_vlan_group *vg;
 	__be16 oldproto;
-	u16 vid, errvid;
 
 	if (br->vlan_proto == proto)
 		return 0;
 
 	/* Add VLANs for the new proto to the device filter. */
 	list_for_each_entry(p, &br->port_list, list) {
-		pv = rtnl_dereference(p->vlan_info);
-		if (!pv)
-			continue;
-
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
-			err = vlan_vid_add(p->dev, proto, vid);
+		vg = nbp_vlan_group(p);
+		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
+			err = vlan_vid_add(p->dev, proto, vlan->vid);
 			if (err)
 				goto err_filt;
 		}
@@ -533,28 +675,21 @@
 
 	/* Delete VLANs for the old proto from the device filter. */
 	list_for_each_entry(p, &br->port_list, list) {
-		pv = rtnl_dereference(p->vlan_info);
-		if (!pv)
-			continue;
-
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
-			vlan_vid_del(p->dev, oldproto, vid);
+		vg = nbp_vlan_group(p);
+		list_for_each_entry(vlan, &vg->vlan_list, vlist)
+			vlan_vid_del(p->dev, oldproto, vlan->vid);
 	}
 
 	return 0;
 
 err_filt:
-	errvid = vid;
-	for_each_set_bit(vid, pv->vlan_bitmap, errvid)
-		vlan_vid_del(p->dev, proto, vid);
+	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
+		vlan_vid_del(p->dev, proto, vlan->vid);
 
 	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
-		pv = rtnl_dereference(p->vlan_info);
-		if (!pv)
-			continue;
-
-		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
-			vlan_vid_del(p->dev, proto, vid);
+		vg = nbp_vlan_group(p);
+		list_for_each_entry(vlan, &vg->vlan_list, vlist)
+			vlan_vid_del(p->dev, proto, vlan->vid);
 	}
 
 	return err;
@@ -576,9 +711,19 @@
 	return err;
 }
 
-static bool vlan_default_pvid(struct net_port_vlans *pv, u16 vid)
+static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
 {
-	return pv && vid == pv->pvid && test_bit(vid, pv->untagged_bitmap);
+	struct net_bridge_vlan *v;
+
+	if (vid != vg->pvid)
+		return false;
+
+	v = br_vlan_lookup(&vg->vlan_hash, vid);
+	if (v && br_vlan_should_use(v) &&
+	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
+		return true;
+
+	return false;
 }
 
 static void br_vlan_disable_default_pvid(struct net_bridge *br)
@@ -589,24 +734,31 @@
 	/* Disable default_pvid on all ports where it is still
 	 * configured.
 	 */
-	if (vlan_default_pvid(br_get_vlan_info(br), pvid))
+	if (vlan_default_pvid(br_vlan_group(br), pvid))
 		br_vlan_delete(br, pvid);
 
 	list_for_each_entry(p, &br->port_list, list) {
-		if (vlan_default_pvid(nbp_get_vlan_info(p), pvid))
+		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
 			nbp_vlan_delete(p, pvid);
 	}
 
 	br->default_pvid = 0;
 }
 
-static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
+int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
 {
+	const struct net_bridge_vlan *pvent;
+	struct net_bridge_vlan_group *vg;
 	struct net_bridge_port *p;
 	u16 old_pvid;
 	int err = 0;
 	unsigned long *changed;
 
+	if (!pvid) {
+		br_vlan_disable_default_pvid(br);
+		return 0;
+	}
+
 	changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
 			  GFP_KERNEL);
 	if (!changed)
@@ -617,11 +769,14 @@
 	/* Update default_pvid config only if we do not conflict with
 	 * user configuration.
 	 */
-	if ((!old_pvid || vlan_default_pvid(br_get_vlan_info(br), old_pvid)) &&
-	    !br_vlan_find(br, pvid)) {
+	vg = br_vlan_group(br);
+	pvent = br_vlan_find(vg, pvid);
+	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
+	    (!pvent || !br_vlan_should_use(pvent))) {
 		err = br_vlan_add(br, pvid,
 				  BRIDGE_VLAN_INFO_PVID |
-				  BRIDGE_VLAN_INFO_UNTAGGED);
+				  BRIDGE_VLAN_INFO_UNTAGGED |
+				  BRIDGE_VLAN_INFO_BRENTRY);
 		if (err)
 			goto out;
 		br_vlan_delete(br, old_pvid);
@@ -632,9 +787,10 @@
 		/* Update default_pvid config only if we do not conflict with
 		 * user configuration.
 		 */
+		vg = nbp_vlan_group(p);
 		if ((old_pvid &&
-		     !vlan_default_pvid(nbp_get_vlan_info(p), old_pvid)) ||
-		    nbp_vlan_find(p, pvid))
+		     !vlan_default_pvid(vg, old_pvid)) ||
+		    br_vlan_find(vg, pvid))
 			continue;
 
 		err = nbp_vlan_add(p, pvid,
@@ -668,7 +824,8 @@
 		if (old_pvid)
 			br_vlan_add(br, old_pvid,
 				    BRIDGE_VLAN_INFO_PVID |
-				    BRIDGE_VLAN_INFO_UNTAGGED);
+				    BRIDGE_VLAN_INFO_UNTAGGED |
+				    BRIDGE_VLAN_INFO_BRENTRY);
 		br_vlan_delete(br, pvid);
 	}
 	goto out;
@@ -694,12 +851,7 @@
 		err = -EPERM;
 		goto unlock;
 	}
-
-	if (!pvid)
-		br_vlan_disable_default_pvid(br);
-	else
-		err = __br_vlan_set_default_pvid(br, pvid);
-
+	err = __br_vlan_set_default_pvid(br, pvid);
 unlock:
 	rtnl_unlock();
 	return err;
@@ -707,10 +859,66 @@
 
 int br_vlan_init(struct net_bridge *br)
 {
+	struct net_bridge_vlan_group *vg;
+	int ret = -ENOMEM;
+
+	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
+	if (!vg)
+		goto out;
+	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
+	if (ret)
+		goto err_rhtbl;
+	INIT_LIST_HEAD(&vg->vlan_list);
 	br->vlan_proto = htons(ETH_P_8021Q);
 	br->default_pvid = 1;
-	return br_vlan_add(br, 1,
-			   BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED);
+	rcu_assign_pointer(br->vlgrp, vg);
+	ret = br_vlan_add(br, 1,
+			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
+			  BRIDGE_VLAN_INFO_BRENTRY);
+	if (ret)
+		goto err_vlan_add;
+
+out:
+	return ret;
+
+err_vlan_add:
+	rhashtable_destroy(&vg->vlan_hash);
+err_rhtbl:
+	kfree(vg);
+
+	goto out;
+}
+
+int nbp_vlan_init(struct net_bridge_port *p)
+{
+	struct net_bridge_vlan_group *vg;
+	int ret = -ENOMEM;
+
+	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
+	if (!vg)
+		goto out;
+
+	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
+	if (ret)
+		goto err_rhtbl;
+	INIT_LIST_HEAD(&vg->vlan_list);
+	rcu_assign_pointer(p->vlgrp, vg);
+	if (p->br->default_pvid) {
+		ret = nbp_vlan_add(p, p->br->default_pvid,
+				   BRIDGE_VLAN_INFO_PVID |
+				   BRIDGE_VLAN_INFO_UNTAGGED);
+		if (ret)
+			goto err_vlan_add;
+	}
+out:
+	return ret;
+
+err_vlan_add:
+	rhashtable_destroy(&vg->vlan_hash);
+err_rhtbl:
+	kfree(vg);
+
+	goto out;
 }
 
 /* Must be protected by RTNL.
@@ -718,35 +926,28 @@
  */
 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
 {
-	struct net_port_vlans *pv = NULL;
-	int err;
+	struct net_bridge_vlan *vlan;
+	int ret;
 
 	ASSERT_RTNL();
 
-	pv = rtnl_dereference(port->vlan_info);
-	if (pv)
-		return __vlan_add(pv, vid, flags);
-
-	/* Create port vlan infomration
-	 */
-	pv = kzalloc(sizeof(*pv), GFP_KERNEL);
-	if (!pv) {
-		err = -ENOMEM;
-		goto clean_up;
+	vlan = br_vlan_find(nbp_vlan_group(port), vid);
+	if (vlan) {
+		__vlan_add_flags(vlan, flags);
+		return 0;
 	}
 
-	pv->port_idx = port->port_no;
-	pv->parent.port = port;
-	err = __vlan_add(pv, vid, flags);
-	if (err)
-		goto clean_up;
+	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+	if (!vlan)
+		return -ENOMEM;
 
-	rcu_assign_pointer(port->vlan_info, pv);
-	return 0;
+	vlan->vid = vid;
+	vlan->port = port;
+	ret = __vlan_add(vlan, flags);
+	if (ret)
+		kfree(vlan);
 
-clean_up:
-	kfree(pv);
-	return err;
+	return ret;
 }
 
 /* Must be protected by RTNL.
@@ -754,61 +955,28 @@
  */
 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
 {
-	struct net_port_vlans *pv;
+	struct net_bridge_vlan *v;
 
 	ASSERT_RTNL();
 
-	pv = rtnl_dereference(port->vlan_info);
-	if (!pv)
-		return -EINVAL;
-
+	v = br_vlan_find(nbp_vlan_group(port), vid);
+	if (!v)
+		return -ENOENT;
 	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
 	br_fdb_delete_by_port(port->br, port, vid, 0);
 
-	return __vlan_del(pv, vid);
+	return __vlan_del(v);
 }
 
 void nbp_vlan_flush(struct net_bridge_port *port)
 {
-	struct net_port_vlans *pv;
-	u16 vid;
+	struct net_bridge_vlan_group *vg;
 
 	ASSERT_RTNL();
 
-	pv = rtnl_dereference(port->vlan_info);
-	if (!pv)
-		return;
-
-	for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
-		vlan_vid_del(port->dev, port->br->vlan_proto, vid);
-
-	__vlan_flush(pv);
-}
-
-bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
-{
-	struct net_port_vlans *pv;
-	bool found = false;
-
-	rcu_read_lock();
-	pv = rcu_dereference(port->vlan_info);
-
-	if (!pv)
-		goto out;
-
-	if (test_bit(vid, pv->vlan_bitmap))
-		found = true;
-
-out:
-	rcu_read_unlock();
-	return found;
-}
-
-int nbp_vlan_init(struct net_bridge_port *p)
-{
-	return p->br->default_pvid ?
-			nbp_vlan_add(p, p->br->default_pvid,
-				     BRIDGE_VLAN_INFO_PVID |
-				     BRIDGE_VLAN_INFO_UNTAGGED) :
-			0;
+	vg = nbp_vlan_group(port);
+	__vlan_flush(vg);
+	RCU_INIT_POINTER(port->vlgrp, NULL);
+	synchronize_rcu();
+	__vlan_group_free(vg);
 }
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index f9242df..32eccd1 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -73,21 +73,18 @@
 static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
 	{
 		.hook		= ebt_in_hook,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_BRIDGE,
 		.hooknum	= NF_BR_LOCAL_IN,
 		.priority	= NF_BR_PRI_FILTER_BRIDGED,
 	},
 	{
 		.hook		= ebt_in_hook,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_BRIDGE,
 		.hooknum	= NF_BR_FORWARD,
 		.priority	= NF_BR_PRI_FILTER_BRIDGED,
 	},
 	{
 		.hook		= ebt_out_hook,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_BRIDGE,
 		.hooknum	= NF_BR_LOCAL_OUT,
 		.priority	= NF_BR_PRI_FILTER_OTHER,
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 4bbefe0..ec55358 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -73,21 +73,18 @@
 static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
 	{
 		.hook		= ebt_nat_out,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_BRIDGE,
 		.hooknum	= NF_BR_LOCAL_OUT,
 		.priority	= NF_BR_PRI_NAT_DST_OTHER,
 	},
 	{
 		.hook		= ebt_nat_out,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_BRIDGE,
 		.hooknum	= NF_BR_POST_ROUTING,
 		.priority	= NF_BR_PRI_NAT_SRC,
 	},
 	{
 		.hook		= ebt_nat_in,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_BRIDGE,
 		.hooknum	= NF_BR_PRE_ROUTING,
 		.priority	= NF_BR_PRI_NAT_DST_BRIDGED,
diff --git a/net/can/bcm.c b/net/can/bcm.c
index a1ba687..6863310 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -96,7 +96,7 @@
 	canid_t can_id;
 	u32 flags;
 	unsigned long frames_abs, frames_filtered;
-	struct timeval ival1, ival2;
+	struct bcm_timeval ival1, ival2;
 	struct hrtimer timer, thrtimer;
 	struct tasklet_struct tsklet, thrtsklet;
 	ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
@@ -131,6 +131,11 @@
 	return (struct bcm_sock *)sk;
 }
 
+static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
+{
+	return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
+}
+
 #define CFSIZ sizeof(struct can_frame)
 #define OPSIZ sizeof(struct bcm_op)
 #define MHSIZ sizeof(struct bcm_msg_head)
@@ -953,8 +958,8 @@
 		op->count = msg_head->count;
 		op->ival1 = msg_head->ival1;
 		op->ival2 = msg_head->ival2;
-		op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
-		op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
+		op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
+		op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
 
 		/* disable an active timer due to zero values? */
 		if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
@@ -1134,8 +1139,8 @@
 			/* set timer value */
 			op->ival1 = msg_head->ival1;
 			op->ival2 = msg_head->ival2;
-			op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
-			op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
+			op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
+			op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
 
 			/* disable an active timer due to zero value? */
 			if (!op->kt_ival1.tv64)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 80b94e3..f79ccac 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -285,6 +285,7 @@
 	switch (op->op) {
 	case CEPH_OSD_OP_READ:
 	case CEPH_OSD_OP_WRITE:
+	case CEPH_OSD_OP_WRITEFULL:
 		ceph_osd_data_release(&op->extent.osd_data);
 		break;
 	case CEPH_OSD_OP_CALL:
@@ -485,13 +486,14 @@
 	size_t payload_len = 0;
 
 	BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
-	       opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE);
+	       opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
+	       opcode != CEPH_OSD_OP_TRUNCATE);
 
 	op->extent.offset = offset;
 	op->extent.length = length;
 	op->extent.truncate_size = truncate_size;
 	op->extent.truncate_seq = truncate_seq;
-	if (opcode == CEPH_OSD_OP_WRITE)
+	if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
 		payload_len += length;
 
 	op->payload_len = payload_len;
@@ -670,9 +672,11 @@
 		break;
 	case CEPH_OSD_OP_READ:
 	case CEPH_OSD_OP_WRITE:
+	case CEPH_OSD_OP_WRITEFULL:
 	case CEPH_OSD_OP_ZERO:
 	case CEPH_OSD_OP_TRUNCATE:
-		if (src->op == CEPH_OSD_OP_WRITE)
+		if (src->op == CEPH_OSD_OP_WRITE ||
+		    src->op == CEPH_OSD_OP_WRITEFULL)
 			request_data_len = src->extent.length;
 		dst->extent.offset = cpu_to_le64(src->extent.offset);
 		dst->extent.length = cpu_to_le64(src->extent.length);
@@ -681,7 +685,8 @@
 		dst->extent.truncate_seq =
 			cpu_to_le32(src->extent.truncate_seq);
 		osd_data = &src->extent.osd_data;
-		if (src->op == CEPH_OSD_OP_WRITE)
+		if (src->op == CEPH_OSD_OP_WRITE ||
+		    src->op == CEPH_OSD_OP_WRITEFULL)
 			ceph_osdc_msg_data_add(req->r_request, osd_data);
 		else
 			ceph_osdc_msg_data_add(req->r_reply, osd_data);
diff --git a/net/core/dev.c b/net/core/dev.c
index 323c04e..13f49f8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -99,6 +99,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/stat.h>
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 #include <net/pkt_sched.h>
 #include <net/checksum.h>
 #include <net/xfrm.h>
@@ -682,6 +683,32 @@
 EXPORT_SYMBOL(dev_get_iflink);
 
 /**
+ *	dev_fill_metadata_dst - Retrieve tunnel egress information.
+ *	@dev: targeted interface
+ *	@skb: The packet.
+ *
+ *	For better visibility of tunnel traffic OVS needs to retrieve
+ *	egress tunnel information for a packet. Following API allows
+ *	user to get this info.
+ */
+int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+	struct ip_tunnel_info *info;
+
+	if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
+		return -EINVAL;
+
+	info = skb_tunnel_info_unclone(skb);
+	if (!info)
+		return -ENOMEM;
+	if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
+		return -EINVAL;
+
+	return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
+}
+EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
+
+/**
  *	__dev_get_by_name	- find a device by its name
  *	@net: the applicable net namespace
  *	@name: name to find
@@ -2974,6 +3001,7 @@
 			new_index = skb_tx_hash(dev, skb);
 
 		if (queue_index != new_index && sk &&
+		    sk_fullsock(sk) &&
 		    rcu_access_pointer(sk->sk_dst_cache))
 			sk_tx_queue_set(sk, new_index);
 
@@ -5345,6 +5373,12 @@
 	changeupper_info.master = master;
 	changeupper_info.linking = true;
 
+	ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
+					    &changeupper_info.info);
+	ret = notifier_to_errno(ret);
+	if (ret)
+		return ret;
+
 	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
 						   master);
 	if (ret)
@@ -5487,6 +5521,9 @@
 	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
 	changeupper_info.linking = false;
 
+	call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
+				      &changeupper_info.info);
+
 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
 
 	/* Here is the tricky part. We must remove all dev's lower
diff --git a/net/core/dst.c b/net/core/dst.c
index 0771c8c..2a18180 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -144,12 +144,12 @@
 	mutex_unlock(&dst_gc_mutex);
 }
 
-int dst_discard_sk(struct sock *sk, struct sk_buff *skb)
+int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	kfree_skb(skb);
 	return 0;
 }
-EXPORT_SYMBOL(dst_discard_sk);
+EXPORT_SYMBOL(dst_discard_out);
 
 const u32 dst_default_metrics[RTAX_MAX + 1] = {
 	/* This initializer is needed to force linker to place this variable
@@ -177,7 +177,7 @@
 	dst->xfrm = NULL;
 #endif
 	dst->input = dst_discard;
-	dst->output = dst_discard_sk;
+	dst->output = dst_discard_out;
 	dst->error = 0;
 	dst->obsolete = initial_obsolete;
 	dst->header_len = 0;
@@ -224,7 +224,7 @@
 	 */
 	if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
 		dst->input = dst_discard;
-		dst->output = dst_discard_sk;
+		dst->output = dst_discard_out;
 	}
 	dst->obsolete = DST_OBSOLETE_DEAD;
 }
@@ -352,7 +352,7 @@
 	.family =		AF_UNSPEC,
 };
 
-static int dst_md_discard_sk(struct sock *sk, struct sk_buff *skb)
+static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	WARN_ONCE(1, "Attempting to call output on metadata dst\n");
 	kfree_skb(skb);
@@ -375,7 +375,7 @@
 		 DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
 
 	dst->input = dst_md_discard;
-	dst->output = dst_md_discard_sk;
+	dst->output = dst_md_discard_out;
 
 	memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
 }
@@ -430,7 +430,7 @@
 
 	if (!unregister) {
 		dst->input = dst_discard;
-		dst->output = dst_discard_sk;
+		dst->output = dst_discard_out;
 	} else {
 		dst->dev = dev_net(dst->dev)->loopback_dev;
 		dev_hold(dst->dev);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index b495ab1..29edf74 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1284,7 +1284,7 @@
 
 	gstrings.len = ret;
 
-	data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+	data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
 	if (!data)
 		return -ENOMEM;
 
diff --git a/net/core/filter.c b/net/core/filter.c
index 60e3fe7..672eefb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -49,16 +49,17 @@
 #include <net/sch_generic.h>
 #include <net/cls_cgroup.h>
 #include <net/dst_metadata.h>
+#include <net/dst.h>
 
 /**
  *	sk_filter - run a packet through a socket filter
  *	@sk: sock associated with &sk_buff
  *	@skb: buffer to filter
  *
- * Run the filter code and then cut skb->data to correct size returned by
- * SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller
+ * Run the eBPF program and then cut skb->data to correct size returned by
+ * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
  * than pkt_len we keep whole skb->data. This is the socket level
- * wrapper to SK_RUN_FILTER. It returns 0 if the packet should
+ * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
  * be accepted or -EPERM if the packet should be tossed.
  *
  */
@@ -82,7 +83,7 @@
 	rcu_read_lock();
 	filter = rcu_dereference(sk->sk_filter);
 	if (filter) {
-		unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
+		unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
 
 		err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
 	}
@@ -148,12 +149,6 @@
 	return raw_smp_processor_id();
 }
 
-/* note that this only generates 32-bit random numbers */
-static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
-{
-	return prandom_u32();
-}
-
 static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
 			      struct bpf_insn *insn_buf)
 {
@@ -312,7 +307,8 @@
 			*insn = BPF_EMIT_CALL(__get_raw_cpu_id);
 			break;
 		case SKF_AD_OFF + SKF_AD_RANDOM:
-			*insn = BPF_EMIT_CALL(__get_random_u32);
+			*insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
+			bpf_user_rnd_init_once();
 			break;
 		}
 		break;
@@ -1001,7 +997,7 @@
 	int err;
 
 	fp->bpf_func = NULL;
-	fp->jited = false;
+	fp->jited = 0;
 
 	err = bpf_check_classic(fp->insns, fp->len);
 	if (err) {
@@ -1083,16 +1079,18 @@
  *	@pfp: the unattached filter that is created
  *	@fprog: the filter program
  *	@trans: post-classic verifier transformation handler
+ *	@save_orig: save classic BPF program
  *
  * This function effectively does the same as bpf_prog_create(), only
  * that it builds up its insns buffer from user space provided buffer.
  * It also allows for passing a bpf_aux_classic_check_t handler.
  */
 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
-			      bpf_aux_classic_check_t trans)
+			      bpf_aux_classic_check_t trans, bool save_orig)
 {
 	unsigned int fsize = bpf_classic_proglen(fprog);
 	struct bpf_prog *fp;
+	int err;
 
 	/* Make sure new filter is there and in the right amounts. */
 	if (fprog->filter == NULL)
@@ -1108,12 +1106,16 @@
 	}
 
 	fp->len = fprog->len;
-	/* Since unattached filters are not copied back to user
-	 * space through sk_get_filter(), we do not need to hold
-	 * a copy here, and can spare us the work.
-	 */
 	fp->orig_prog = NULL;
 
+	if (save_orig) {
+		err = bpf_prog_store_orig_filter(fp, fprog);
+		if (err) {
+			__bpf_prog_free(fp);
+			return -ENOMEM;
+		}
+	}
+
 	/* bpf_prepare_filter() already takes care of freeing
 	 * memory in case something goes wrong.
 	 */
@@ -1412,6 +1414,7 @@
 		return dev_forward_skb(dev, skb2);
 
 	skb2->dev = dev;
+	skb_sender_cpu_clear(skb2);
 	return dev_queue_xmit(skb2);
 }
 
@@ -1455,6 +1458,7 @@
 		return dev_forward_skb(dev, skb);
 
 	skb->dev = dev;
+	skb_sender_cpu_clear(skb);
 	return dev_queue_xmit(skb);
 }
 
@@ -1478,6 +1482,25 @@
 	.arg1_type      = ARG_PTR_TO_CTX,
 };
 
+static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+#ifdef CONFIG_IP_ROUTE_CLASSID
+	const struct dst_entry *dst;
+
+	dst = skb_dst((struct sk_buff *) (unsigned long) r1);
+	if (dst)
+		return dst->tclassid;
+#endif
+	return 0;
+}
+
+static const struct bpf_func_proto bpf_get_route_realm_proto = {
+	.func           = bpf_get_route_realm,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+
 static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
 {
 	struct sk_buff *skb = (struct sk_buff *) (long) r1;
@@ -1618,7 +1641,8 @@
 	case BPF_FUNC_ktime_get_ns:
 		return &bpf_ktime_get_ns_proto;
 	case BPF_FUNC_trace_printk:
-		return bpf_get_trace_printk_proto();
+		if (capable(CAP_SYS_ADMIN))
+			return bpf_get_trace_printk_proto();
 	default:
 		return NULL;
 	}
@@ -1648,6 +1672,8 @@
 		return bpf_get_skb_set_tunnel_key_proto();
 	case BPF_FUNC_redirect:
 		return &bpf_redirect_proto;
+	case BPF_FUNC_get_route_realm:
+		return &bpf_get_route_realm_proto;
 	default:
 		return sk_filter_func_proto(func_id);
 	}
@@ -1699,6 +1725,7 @@
 		switch (off) {
 		case offsetof(struct __sk_buff, mark):
 		case offsetof(struct __sk_buff, tc_index):
+		case offsetof(struct __sk_buff, priority):
 		case offsetof(struct __sk_buff, cb[0]) ...
 			offsetof(struct __sk_buff, cb[4]):
 			break;
@@ -1711,7 +1738,8 @@
 
 static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
 				      int src_reg, int ctx_off,
-				      struct bpf_insn *insn_buf)
+				      struct bpf_insn *insn_buf,
+				      struct bpf_prog *prog)
 {
 	struct bpf_insn *insn = insn_buf;
 
@@ -1740,8 +1768,12 @@
 	case offsetof(struct __sk_buff, priority):
 		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
 
-		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
-				      offsetof(struct sk_buff, priority));
+		if (type == BPF_WRITE)
+			*insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+					      offsetof(struct sk_buff, priority));
+		else
+			*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+					      offsetof(struct sk_buff, priority));
 		break;
 
 	case offsetof(struct __sk_buff, ingress_ifindex):
@@ -1798,6 +1830,7 @@
 		offsetof(struct __sk_buff, cb[4]):
 		BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
 
+		prog->cb_access = 1;
 		ctx_off -= offsetof(struct __sk_buff, cb[0]);
 		ctx_off += offsetof(struct sk_buff, cb);
 		ctx_off += offsetof(struct qdisc_skb_cb, data);
@@ -1909,9 +1942,13 @@
 		goto out;
 
 	/* We're copying the filter that has been originally attached,
-	 * so no conversion/decode needed anymore.
+	 * so no conversion/decode needed anymore. eBPF programs that
+	 * have no original program cannot be dumped through this.
 	 */
+	ret = -EACCES;
 	fprog = filter->prog->orig_prog;
+	if (!fprog)
+		goto out;
 
 	ret = fprog->len;
 	if (!len)
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index dfb1a9c..299cfc2 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -180,7 +180,7 @@
 }
 EXPORT_SYMBOL(lwtunnel_cmp_encap);
 
-int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
 	const struct lwtunnel_encap_ops *ops;
@@ -199,7 +199,7 @@
 	rcu_read_lock();
 	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
 	if (likely(ops && ops->output))
-		ret = ops->output(sk, skb);
+		ret = ops->output(net, sk, skb);
 	rcu_read_unlock();
 
 	if (ret == -EOPNOTSUPP)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 2b515ba..1aa8437 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2235,14 +2235,53 @@
 	__neigh_notify(neigh, RTM_NEWNEIGH, 0);
 }
 
+static bool neigh_master_filtered(struct net_device *dev, int master_idx)
+{
+	struct net_device *master;
+
+	if (!master_idx)
+		return false;
+
+	master = netdev_master_upper_dev_get(dev);
+	if (!master || master->ifindex != master_idx)
+		return true;
+
+	return false;
+}
+
+static bool neigh_ifindex_filtered(struct net_device *dev, int filter_idx)
+{
+	if (filter_idx && dev->ifindex != filter_idx)
+		return true;
+
+	return false;
+}
+
 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
 			    struct netlink_callback *cb)
 {
 	struct net *net = sock_net(skb->sk);
+	const struct nlmsghdr *nlh = cb->nlh;
+	struct nlattr *tb[NDA_MAX + 1];
 	struct neighbour *n;
 	int rc, h, s_h = cb->args[1];
 	int idx, s_idx = idx = cb->args[2];
 	struct neigh_hash_table *nht;
+	int filter_master_idx = 0, filter_idx = 0;
+	unsigned int flags = NLM_F_MULTI;
+	int err;
+
+	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
+	if (!err) {
+		if (tb[NDA_IFINDEX])
+			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
+
+		if (tb[NDA_MASTER])
+			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
+
+		if (filter_idx || filter_master_idx)
+			flags |= NLM_F_DUMP_FILTERED;
+	}
 
 	rcu_read_lock_bh();
 	nht = rcu_dereference_bh(tbl->nht);
@@ -2255,12 +2294,16 @@
 		     n = rcu_dereference_bh(n->next)) {
 			if (!net_eq(dev_net(n->dev), net))
 				continue;
+			if (neigh_ifindex_filtered(n->dev, filter_idx))
+				continue;
+			if (neigh_master_filtered(n->dev, filter_master_idx))
+				continue;
 			if (idx < s_idx)
 				goto next;
 			if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
 					    cb->nlh->nlmsg_seq,
 					    RTM_NEWNEIGH,
-					    NLM_F_MULTI) < 0) {
+					    flags) < 0) {
 				rc = -1;
 				goto out;
 			}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index b4c5300..f88a62a 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -31,7 +31,6 @@
 static const char fmt_hex[] = "%#x\n";
 static const char fmt_long_hex[] = "%#lx\n";
 static const char fmt_dec[] = "%d\n";
-static const char fmt_udec[] = "%u\n";
 static const char fmt_ulong[] = "%lu\n";
 static const char fmt_u64[] = "%llu\n";
 
@@ -202,7 +201,7 @@
 	if (netif_running(netdev)) {
 		struct ethtool_cmd cmd;
 		if (!__ethtool_get_settings(netdev, &cmd))
-			ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
+			ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd));
 	}
 	rtnl_unlock();
 	return ret;
@@ -472,7 +471,7 @@
 
 	if (dev_isalive(netdev)) {
 		struct switchdev_attr attr = {
-			.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+			.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
 			.flags = SWITCHDEV_F_NO_RECURSE,
 		};
 
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8bdada2..94acfc8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -140,7 +140,7 @@
  * case. Further, we test the poll_owner to avoid recursion on UP
  * systems where the lock doesn't exist.
  */
-static int poll_one_napi(struct napi_struct *napi, int budget)
+static void poll_one_napi(struct napi_struct *napi)
 {
 	int work = 0;
 
@@ -149,33 +149,33 @@
 	 * holding the napi->poll_lock.
 	 */
 	if (!test_bit(NAPI_STATE_SCHED, &napi->state))
-		return budget;
+		return;
 
 	/* If we set this bit but see that it has already been set,
 	 * that indicates that napi has been disabled and we need
 	 * to abort this operation
 	 */
 	if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
-		goto out;
+		return;
 
-	work = napi->poll(napi, budget);
-	WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
+	/* We explicilty pass the polling call a budget of 0 to
+	 * indicate that we are clearing the Tx path only.
+	 */
+	work = napi->poll(napi, 0);
+	WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll);
 	trace_napi_poll(napi);
 
 	clear_bit(NAPI_STATE_NPSVC, &napi->state);
-
-out:
-	return budget - work;
 }
 
-static void poll_napi(struct net_device *dev, int budget)
+static void poll_napi(struct net_device *dev)
 {
 	struct napi_struct *napi;
 
 	list_for_each_entry(napi, &dev->napi_list, dev_list) {
 		if (napi->poll_owner != smp_processor_id() &&
 		    spin_trylock(&napi->poll_lock)) {
-			budget = poll_one_napi(napi, budget);
+			poll_one_napi(napi);
 			spin_unlock(&napi->poll_lock);
 		}
 	}
@@ -185,7 +185,6 @@
 {
 	const struct net_device_ops *ops;
 	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
-	int budget = 0;
 
 	/* Don't do any rx activity if the dev_lock mutex is held
 	 * the dev_open/close paths use this to block netpoll activity
@@ -208,7 +207,7 @@
 	/* Process pending work on NIC */
 	ops->ndo_poll_controller(dev);
 
-	poll_napi(dev, budget);
+	poll_napi(dev);
 
 	up(&ni->dev_lock);
 
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index b42f0e2..5d26056 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -37,90 +37,16 @@
 int sysctl_max_syn_backlog = 256;
 EXPORT_SYMBOL(sysctl_max_syn_backlog);
 
-int reqsk_queue_alloc(struct request_sock_queue *queue,
-		      unsigned int nr_table_entries)
+void reqsk_queue_alloc(struct request_sock_queue *queue)
 {
-	size_t lopt_size = sizeof(struct listen_sock);
-	struct listen_sock *lopt = NULL;
+	spin_lock_init(&queue->rskq_lock);
 
-	nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
-	nr_table_entries = max_t(u32, nr_table_entries, 8);
-	nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
-	lopt_size += nr_table_entries * sizeof(struct request_sock *);
+	spin_lock_init(&queue->fastopenq.lock);
+	queue->fastopenq.rskq_rst_head = NULL;
+	queue->fastopenq.rskq_rst_tail = NULL;
+	queue->fastopenq.qlen = 0;
 
-	if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
-		lopt = kzalloc(lopt_size, GFP_KERNEL |
-					  __GFP_NOWARN |
-					  __GFP_NORETRY);
-	if (!lopt)
-		lopt = vzalloc(lopt_size);
-	if (!lopt)
-		return -ENOMEM;
-
-	get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
-	spin_lock_init(&queue->syn_wait_lock);
 	queue->rskq_accept_head = NULL;
-	lopt->nr_table_entries = nr_table_entries;
-	lopt->max_qlen_log = ilog2(nr_table_entries);
-
-	spin_lock_bh(&queue->syn_wait_lock);
-	queue->listen_opt = lopt;
-	spin_unlock_bh(&queue->syn_wait_lock);
-
-	return 0;
-}
-
-void __reqsk_queue_destroy(struct request_sock_queue *queue)
-{
-	/* This is an error recovery path only, no locking needed */
-	kvfree(queue->listen_opt);
-}
-
-static inline struct listen_sock *reqsk_queue_yank_listen_sk(
-		struct request_sock_queue *queue)
-{
-	struct listen_sock *lopt;
-
-	spin_lock_bh(&queue->syn_wait_lock);
-	lopt = queue->listen_opt;
-	queue->listen_opt = NULL;
-	spin_unlock_bh(&queue->syn_wait_lock);
-
-	return lopt;
-}
-
-void reqsk_queue_destroy(struct request_sock_queue *queue)
-{
-	/* make all the listen_opt local to us */
-	struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
-
-	if (listen_sock_qlen(lopt) != 0) {
-		unsigned int i;
-
-		for (i = 0; i < lopt->nr_table_entries; i++) {
-			struct request_sock *req;
-
-			spin_lock_bh(&queue->syn_wait_lock);
-			while ((req = lopt->syn_table[i]) != NULL) {
-				lopt->syn_table[i] = req->dl_next;
-				/* Because of following del_timer_sync(),
-				 * we must release the spinlock here
-				 * or risk a dead lock.
-				 */
-				spin_unlock_bh(&queue->syn_wait_lock);
-				atomic_inc(&lopt->qlen_dec);
-				if (del_timer_sync(&req->rsk_timer))
-					reqsk_put(req);
-				reqsk_put(req);
-				spin_lock_bh(&queue->syn_wait_lock);
-			}
-			spin_unlock_bh(&queue->syn_wait_lock);
-		}
-	}
-
-	if (WARN_ON(listen_sock_qlen(lopt) != 0))
-		pr_err("qlen %u\n", listen_sock_qlen(lopt));
-	kvfree(lopt);
 }
 
 /*
@@ -174,7 +100,7 @@
 	struct sock *lsk = req->rsk_listener;
 	struct fastopen_queue *fastopenq;
 
-	fastopenq = inet_csk(lsk)->icsk_accept_queue.fastopenq;
+	fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq;
 
 	tcp_sk(sk)->fastopen_rsk = NULL;
 	spin_lock_bh(&fastopenq->lock);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 474a6da..504bd17 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -96,7 +96,7 @@
 EXPORT_SYMBOL(rtnl_is_locked);
 
 #ifdef CONFIG_PROVE_LOCKING
-int lockdep_rtnl_is_held(void)
+bool lockdep_rtnl_is_held(void)
 {
 	return lockdep_is_held(&rtnl_mutex);
 }
@@ -497,7 +497,8 @@
 }
 EXPORT_SYMBOL_GPL(rtnl_af_unregister);
 
-static size_t rtnl_link_get_af_size(const struct net_device *dev)
+static size_t rtnl_link_get_af_size(const struct net_device *dev,
+				    u32 ext_filter_mask)
 {
 	struct rtnl_af_ops *af_ops;
 	size_t size;
@@ -509,7 +510,7 @@
 		if (af_ops->get_link_af_size) {
 			/* AF_* + nested data */
 			size += nla_total_size(sizeof(struct nlattr)) +
-				af_ops->get_link_af_size(dev);
+				af_ops->get_link_af_size(dev, ext_filter_mask);
 		}
 	}
 
@@ -837,7 +838,8 @@
 			 /* IFLA_VF_STATS_BROADCAST */
 			 nla_total_size(sizeof(__u64)) +
 			 /* IFLA_VF_STATS_MULTICAST */
-			 nla_total_size(sizeof(__u64)));
+			 nla_total_size(sizeof(__u64)) +
+			 nla_total_size(sizeof(struct ifla_vf_trust)));
 		return size;
 	} else
 		return 0;
@@ -900,7 +902,7 @@
 	       + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
 	       + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
 	       + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
-	       + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+	       + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
 	       + nla_total_size(1); /* IFLA_PROTO_DOWN */
@@ -1025,7 +1027,7 @@
 {
 	int err;
 	struct switchdev_attr attr = {
-		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
 		.flags = SWITCHDEV_F_NO_RECURSE,
 	};
 
@@ -1160,6 +1162,7 @@
 			struct ifla_vf_link_state vf_linkstate;
 			struct ifla_vf_rss_query_en vf_rss_query_en;
 			struct ifla_vf_stats vf_stats;
+			struct ifla_vf_trust vf_trust;
 
 			/*
 			 * Not all SR-IOV capable drivers support the
@@ -1169,6 +1172,7 @@
 			 */
 			ivi.spoofchk = -1;
 			ivi.rss_query_en = -1;
+			ivi.trusted = -1;
 			memset(ivi.mac, 0, sizeof(ivi.mac));
 			/* The default value for VF link state is "auto"
 			 * IFLA_VF_LINK_STATE_AUTO which equals zero
@@ -1182,7 +1186,8 @@
 				vf_tx_rate.vf =
 				vf_spoofchk.vf =
 				vf_linkstate.vf =
-				vf_rss_query_en.vf = ivi.vf;
+				vf_rss_query_en.vf =
+				vf_trust.vf = ivi.vf;
 
 			memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
 			vf_vlan.vlan = ivi.vlan;
@@ -1193,6 +1198,7 @@
 			vf_spoofchk.setting = ivi.spoofchk;
 			vf_linkstate.link_state = ivi.linkstate;
 			vf_rss_query_en.setting = ivi.rss_query_en;
+			vf_trust.setting = ivi.trusted;
 			vf = nla_nest_start(skb, IFLA_VF_INFO);
 			if (!vf) {
 				nla_nest_cancel(skb, vfinfo);
@@ -1210,7 +1216,9 @@
 				    &vf_linkstate) ||
 			    nla_put(skb, IFLA_VF_RSS_QUERY_EN,
 				    sizeof(vf_rss_query_en),
-				    &vf_rss_query_en))
+				    &vf_rss_query_en) ||
+			    nla_put(skb, IFLA_VF_TRUST,
+				    sizeof(vf_trust), &vf_trust))
 				goto nla_put_failure;
 			memset(&vf_stats, 0, sizeof(vf_stats));
 			if (dev->netdev_ops->ndo_get_vf_stats)
@@ -1347,6 +1355,7 @@
 	[IFLA_VF_LINK_STATE]	= { .len = sizeof(struct ifla_vf_link_state) },
 	[IFLA_VF_RSS_QUERY_EN]	= { .len = sizeof(struct ifla_vf_rss_query_en) },
 	[IFLA_VF_STATS]		= { .type = NLA_NESTED },
+	[IFLA_VF_TRUST]		= { .len = sizeof(struct ifla_vf_trust) },
 };
 
 static const struct nla_policy ifla_vf_stats_policy[IFLA_VF_STATS_MAX + 1] = {
@@ -1586,6 +1595,16 @@
 			return err;
 	}
 
+	if (tb[IFLA_VF_TRUST]) {
+		struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
+
+		err = -EOPNOTSUPP;
+		if (ops->ndo_set_vf_trust)
+			err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
+		if (err < 0)
+			return err;
+	}
+
 	return err;
 }
 
@@ -3443,4 +3462,3 @@
 	rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
 	rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
 }
-
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index dad4dd3..fab4599 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2958,11 +2958,12 @@
  */
 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
 {
+	unsigned char *data = skb->data;
+
 	BUG_ON(len > skb->len);
-	skb->len -= len;
-	BUG_ON(skb->len < skb->data_len);
-	skb_postpull_rcsum(skb, skb->data, len);
-	return skb->data += len;
+	__skb_pull(skb, len);
+	skb_postpull_rcsum(skb, data, len);
+	return skb->data;
 }
 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
 
diff --git a/net/core/sock.c b/net/core/sock.c
index 3307c02..0ef30aa 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -422,13 +422,25 @@
 	}
 }
 
+static bool sock_needs_netstamp(const struct sock *sk)
+{
+	switch (sk->sk_family) {
+	case AF_UNSPEC:
+	case AF_UNIX:
+		return false;
+	default:
+		return true;
+	}
+}
+
 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
 
 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 {
 	if (sk->sk_flags & flags) {
 		sk->sk_flags &= ~flags;
-		if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
+		if (sock_needs_netstamp(sk) &&
+		    !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
 			net_disable_timestamp();
 	}
 }
@@ -988,6 +1000,10 @@
 					 sk->sk_max_pacing_rate);
 		break;
 
+	case SO_INCOMING_CPU:
+		sk->sk_incoming_cpu = val;
+		break;
+
 	default:
 		ret = -ENOPROTOOPT;
 		break;
@@ -1578,7 +1594,8 @@
 		if (newsk->sk_prot->sockets_allocated)
 			sk_sockets_allocated_inc(newsk);
 
-		if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
+		if (sock_needs_netstamp(sk) &&
+		    newsk->sk_flags & SK_FLAGS_TIMESTAMP)
 			net_enable_timestamp();
 	}
 out:
@@ -1852,6 +1869,32 @@
 }
 EXPORT_SYMBOL(sock_alloc_send_skb);
 
+int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
+		   struct sockcm_cookie *sockc)
+{
+	struct cmsghdr *cmsg;
+
+	for_each_cmsghdr(cmsg, msg) {
+		if (!CMSG_OK(msg, cmsg))
+			return -EINVAL;
+		if (cmsg->cmsg_level != SOL_SOCKET)
+			continue;
+		switch (cmsg->cmsg_type) {
+		case SO_MARK:
+			if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+				return -EPERM;
+			if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
+				return -EINVAL;
+			sockc->mark = *(u32 *)CMSG_DATA(cmsg);
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(sock_cmsg_send);
+
 /* On 32bit arches, an skb frag is limited to 2^15 */
 #define SKB_FRAG_PAGE_ORDER	get_order(32768)
 
@@ -2353,6 +2396,7 @@
 
 	sk->sk_max_pacing_rate = ~0U;
 	sk->sk_pacing_rate = ~0U;
+	sk->sk_incoming_cpu = -1;
 	/*
 	 * Before updating sk_refcnt, we must commit prior changes to memory
 	 * (Documentation/RCU/rculist_nulls.txt for details)
@@ -2479,7 +2523,8 @@
 		 * time stamping, but time stamping might have been on
 		 * already because of the other one
 		 */
-		if (!(previous_flags & SK_FLAGS_TIMESTAMP))
+		if (sock_needs_netstamp(sk) &&
+		    !(previous_flags & SK_FLAGS_TIMESTAMP))
 			net_enable_timestamp();
 	}
 }
@@ -2758,7 +2803,7 @@
 
 	rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
 					   rsk_prot->obj_size, 0,
-					   0, NULL);
+					   prot->slab_flags, NULL);
 
 	if (!rsk_prot->slab) {
 		pr_crit("%s: Can't create request sock SLAB cache!\n",
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 817622f..0c1d58d 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -1,3 +1,5 @@
+/* License: GPL */
+
 #include <linux/mutex.h>
 #include <linux/socket.h>
 #include <linux/skbuff.h>
@@ -323,14 +325,4 @@
 	BUG_ON(!broadcast_wq);
 	return register_pernet_subsys(&diag_net_ops);
 }
-
-static void __exit sock_diag_exit(void)
-{
-	unregister_pernet_subsys(&diag_net_ops);
-	destroy_workqueue(broadcast_wq);
-}
-
-module_init(sock_diag_init);
-module_exit(sock_diag_exit);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG);
+device_initcall(sock_diag_init);
diff --git a/net/core/tso.c b/net/core/tso.c
index 630b30b..5dca7ce 100644
--- a/net/core/tso.c
+++ b/net/core/tso.c
@@ -1,4 +1,5 @@
 #include <linux/export.h>
+#include <linux/if_vlan.h>
 #include <net/ip.h>
 #include <net/tso.h>
 #include <asm/unaligned.h>
@@ -14,18 +15,24 @@
 void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
 		   int size, bool is_last)
 {
-	struct iphdr *iph;
 	struct tcphdr *tcph;
 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
 	int mac_hdr_len = skb_network_offset(skb);
 
 	memcpy(hdr, skb->data, hdr_len);
-	iph = (struct iphdr *)(hdr + mac_hdr_len);
-	iph->id = htons(tso->ip_id);
-	iph->tot_len = htons(size + hdr_len - mac_hdr_len);
+	if (!tso->ipv6) {
+		struct iphdr *iph = (void *)(hdr + mac_hdr_len);
+
+		iph->id = htons(tso->ip_id);
+		iph->tot_len = htons(size + hdr_len - mac_hdr_len);
+		tso->ip_id++;
+	} else {
+		struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len);
+
+		iph->payload_len = htons(size + tcp_hdrlen(skb));
+	}
 	tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
 	put_unaligned_be32(tso->tcp_seq, &tcph->seq);
-	tso->ip_id++;
 
 	if (!is_last) {
 		/* Clear all special flags for not last packet */
@@ -61,6 +68,7 @@
 	tso->ip_id = ntohs(ip_hdr(skb)->id);
 	tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
 	tso->next_frag_idx = 0;
+	tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6);
 
 	/* Build first data */
 	tso->size = skb_headlen(skb) - hdr_len;
diff --git a/net/core/utils.c b/net/core/utils.c
index 3dffce9..3d17ca8 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -348,52 +348,3 @@
 	}
 }
 EXPORT_SYMBOL(inet_proto_csum_replace_by_diff);
-
-struct __net_random_once_work {
-	struct work_struct work;
-	struct static_key *key;
-};
-
-static void __net_random_once_deferred(struct work_struct *w)
-{
-	struct __net_random_once_work *work =
-		container_of(w, struct __net_random_once_work, work);
-	BUG_ON(!static_key_enabled(work->key));
-	static_key_slow_dec(work->key);
-	kfree(work);
-}
-
-static void __net_random_once_disable_jump(struct static_key *key)
-{
-	struct __net_random_once_work *w;
-
-	w = kmalloc(sizeof(*w), GFP_ATOMIC);
-	if (!w)
-		return;
-
-	INIT_WORK(&w->work, __net_random_once_deferred);
-	w->key = key;
-	schedule_work(&w->work);
-}
-
-bool __net_get_random_once(void *buf, int nbytes, bool *done,
-			   struct static_key *once_key)
-{
-	static DEFINE_SPINLOCK(lock);
-	unsigned long flags;
-
-	spin_lock_irqsave(&lock, flags);
-	if (*done) {
-		spin_unlock_irqrestore(&lock, flags);
-		return false;
-	}
-
-	get_random_bytes(buf, nbytes);
-	*done = true;
-	spin_unlock_irqrestore(&lock, flags);
-
-	__net_random_once_disable_jump(once_key);
-
-	return true;
-}
-EXPORT_SYMBOL(__net_get_random_once);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 5b21f6f..4f6c186 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -13,6 +13,7 @@
  * You should have received a copy of the GNU General Public License along with
  * this program; if not, see <http://www.gnu.org/licenses/>.
  *
+ * Description: Data Center Bridging netlink interface
  * Author: Lucy Liu <lucy.liu@intel.com>
  */
 
@@ -24,7 +25,7 @@
 #include <linux/dcbnl.h>
 #include <net/dcbevent.h>
 #include <linux/rtnetlink.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <net/sock.h>
 
 /* Data Center Bridging (DCB) is a collection of Ethernet enhancements
@@ -48,10 +49,6 @@
  * features for capable devices.
  */
 
-MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
-MODULE_DESCRIPTION("Data Center Bridging netlink interface");
-MODULE_LICENSE("GPL");
-
 /**************** DCB attribute policies *************************************/
 
 /* DCB netlink attributes policy */
@@ -1935,19 +1932,6 @@
 }
 EXPORT_SYMBOL(dcb_ieee_delapp);
 
-static void dcb_flushapp(void)
-{
-	struct dcb_app_type *app;
-	struct dcb_app_type *tmp;
-
-	spin_lock_bh(&dcb_lock);
-	list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
-		list_del(&app->list);
-		kfree(app);
-	}
-	spin_unlock_bh(&dcb_lock);
-}
-
 static int __init dcbnl_init(void)
 {
 	INIT_LIST_HEAD(&dcb_app_list);
@@ -1957,12 +1941,4 @@
 
 	return 0;
 }
-module_init(dcbnl_init);
-
-static void __exit dcbnl_exit(void)
-{
-	rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
-	rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
-	dcb_flushapp();
-}
-module_exit(dcbnl_exit);
+device_initcall(dcbnl_init);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 31e96df..b0e28d2 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -229,7 +229,7 @@
 int dccp_retransmit_skb(struct sock *sk);
 
 void dccp_send_ack(struct sock *sk);
-void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 			 struct request_sock *rsk);
 
 void dccp_send_sync(struct sock *sk, const u64 seq,
@@ -270,15 +270,17 @@
 
 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 
-struct sock *dccp_create_openreq_child(struct sock *sk,
+struct sock *dccp_create_openreq_child(const struct sock *sk,
 				       const struct request_sock *req,
 				       const struct sk_buff *skb);
 
 int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 
-struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
+struct sock *dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb,
 				       struct request_sock *req,
-				       struct dst_entry *dst);
+				       struct dst_entry *dst,
+				       struct request_sock *req_unhash,
+				       bool *own_req);
 struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
 			    struct request_sock *req);
 
@@ -325,13 +327,13 @@
 int dccp_invalid_packet(struct sk_buff *skb);
 u32 dccp_sample_rtt(struct sock *sk, long delta);
 
-static inline int dccp_bad_service_code(const struct sock *sk,
+static inline bool dccp_bad_service_code(const struct sock *sk,
 					const __be32 service)
 {
 	const struct dccp_sock *dp = dccp_sk(sk);
 
 	if (dp->dccps_service == service)
-		return 0;
+		return false;
 	return !dccp_list_has_service(dp->dccps_service_list, service);
 }
 
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index a46ae9c..5684e14 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -208,7 +208,6 @@
 
 	if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-		reqsk_put(req);
 	} else {
 		/*
 		 * Still in RESPOND, just remove it silently.
@@ -218,6 +217,7 @@
 		 */
 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
 	}
+	reqsk_put(req);
 }
 EXPORT_SYMBOL(dccp_req_err);
 
@@ -390,9 +390,12 @@
  *
  * This is the equivalent of TCP's tcp_v4_syn_recv_sock
  */
-struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
+struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
+				       struct sk_buff *skb,
 				       struct request_sock *req,
-				       struct dst_entry *dst)
+				       struct dst_entry *dst,
+				       struct request_sock *req_unhash,
+				       bool *own_req)
 {
 	struct inet_request_sock *ireq;
 	struct inet_sock *newinet;
@@ -425,7 +428,7 @@
 
 	if (__inet_inherit_port(sk, newsk) < 0)
 		goto put_and_exit;
-	__inet_hash_nolisten(newsk, NULL);
+	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 
 	return newsk;
 
@@ -443,36 +446,6 @@
 }
 EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
 
-static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
-{
-	const struct dccp_hdr *dh = dccp_hdr(skb);
-	const struct iphdr *iph = ip_hdr(skb);
-	struct sock *nsk;
-	/* Find possible connection requests. */
-	struct request_sock *req = inet_csk_search_req(sk, dh->dccph_sport,
-						       iph->saddr, iph->daddr);
-	if (req) {
-		nsk = dccp_check_req(sk, skb, req);
-		if (!nsk)
-			reqsk_put(req);
-		return nsk;
-	}
-	nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
-				      iph->saddr, dh->dccph_sport,
-				      iph->daddr, dh->dccph_dport,
-				      inet_iif(skb));
-	if (nsk != NULL) {
-		if (nsk->sk_state != DCCP_TIME_WAIT) {
-			bh_lock_sock(nsk);
-			return nsk;
-		}
-		inet_twsk_put(inet_twsk(nsk));
-		return NULL;
-	}
-
-	return sk;
-}
-
 static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
 					   struct sk_buff *skb)
 {
@@ -527,7 +500,7 @@
 	return err;
 }
 
-static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
+static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
 {
 	int err;
 	const struct iphdr *rxiph;
@@ -624,7 +597,7 @@
 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
 		goto drop;
 
-	req = inet_reqsk_alloc(&dccp_request_sock_ops, sk);
+	req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true);
 	if (req == NULL)
 		goto drop;
 
@@ -704,18 +677,6 @@
 	 * NOTE: the check for the packet types is done in
 	 *	 dccp_rcv_state_process
 	 */
-	if (sk->sk_state == DCCP_LISTEN) {
-		struct sock *nsk = dccp_v4_hnd_req(sk, skb);
-
-		if (nsk == NULL)
-			goto discard;
-
-		if (nsk != sk) {
-			if (dccp_child_process(sk, nsk, skb))
-				goto reset;
-			return 0;
-		}
-	}
 
 	if (dccp_rcv_state_process(sk, skb, dh, skb->len))
 		goto reset;
@@ -723,7 +684,6 @@
 
 reset:
 	dccp_v4_ctl_send_reset(sk, skb);
-discard:
 	kfree_skb(skb);
 	return 0;
 }
@@ -841,15 +801,10 @@
 				  DCCP_SKB_CB(skb)->dccpd_ack_seq);
 	}
 
-	/* Step 2:
-	 *	Look up flow ID in table and get corresponding socket */
+lookup:
 	sk = __inet_lookup_skb(&dccp_hashinfo, skb,
 			       dh->dccph_sport, dh->dccph_dport);
-	/*
-	 * Step 2:
-	 *	If no socket ...
-	 */
-	if (sk == NULL) {
+	if (!sk) {
 		dccp_pr_debug("failed to look up flow ID in table and "
 			      "get corresponding socket\n");
 		goto no_dccp_socket;
@@ -867,6 +822,31 @@
 		goto no_dccp_socket;
 	}
 
+	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
+		struct request_sock *req = inet_reqsk(sk);
+		struct sock *nsk = NULL;
+
+		sk = req->rsk_listener;
+		if (likely(sk->sk_state == DCCP_LISTEN)) {
+			nsk = dccp_check_req(sk, skb, req);
+		} else {
+			inet_csk_reqsk_queue_drop_and_put(sk, req);
+			goto lookup;
+		}
+		if (!nsk) {
+			reqsk_put(req);
+			goto discard_it;
+		}
+		if (nsk == sk) {
+			sock_hold(sk);
+			reqsk_put(req);
+		} else if (dccp_child_process(sk, nsk, skb)) {
+			dccp_v4_ctl_send_reset(sk, skb);
+			goto discard_it;
+		} else {
+			return 0;
+		}
+	}
 	/*
 	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
 	 *	o if MinCsCov = 0, only packets with CsCov = 0 are accepted
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4fa199d..ef4e48c 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -234,7 +234,7 @@
 	kfree_skb(inet_rsk(req)->pktopts);
 }
 
-static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
+static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
 {
 	const struct ipv6hdr *rxip6h;
 	struct sk_buff *skb;
@@ -290,37 +290,6 @@
 	.syn_ack_timeout = dccp_syn_ack_timeout,
 };
 
-static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
-{
-	const struct dccp_hdr *dh = dccp_hdr(skb);
-	const struct ipv6hdr *iph = ipv6_hdr(skb);
-	struct request_sock *req;
-	struct sock *nsk;
-
-	req = inet6_csk_search_req(sk, dh->dccph_sport, &iph->saddr,
-				   &iph->daddr, inet6_iif(skb));
-	if (req) {
-		nsk = dccp_check_req(sk, skb, req);
-		if (!nsk)
-			reqsk_put(req);
-		return nsk;
-	}
-	nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
-					 &iph->saddr, dh->dccph_sport,
-					 &iph->daddr, ntohs(dh->dccph_dport),
-					 inet6_iif(skb));
-	if (nsk != NULL) {
-		if (nsk->sk_state != DCCP_TIME_WAIT) {
-			bh_lock_sock(nsk);
-			return nsk;
-		}
-		inet_twsk_put(inet_twsk(nsk));
-		return NULL;
-	}
-
-	return sk;
-}
-
 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 {
 	struct request_sock *req;
@@ -350,7 +319,7 @@
 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
 		goto drop;
 
-	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk);
+	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
 	if (req == NULL)
 		goto drop;
 
@@ -398,7 +367,7 @@
 	if (dccp_v6_send_response(sk, req))
 		goto drop_and_free;
 
-	inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
 	return 0;
 
 drop_and_free:
@@ -408,13 +377,16 @@
 	return -1;
 }
 
-static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
+static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
 					      struct sk_buff *skb,
 					      struct request_sock *req,
-					      struct dst_entry *dst)
+					      struct dst_entry *dst,
+					      struct request_sock *req_unhash,
+					      bool *own_req)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
-	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
+	struct ipv6_pinfo *newnp;
+	const struct ipv6_pinfo *np = inet6_sk(sk);
 	struct inet_sock *newinet;
 	struct dccp6_sock *newdp6;
 	struct sock *newsk;
@@ -423,7 +395,8 @@
 		/*
 		 *	v6 mapped
 		 */
-		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
+		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
+						  req_unhash, own_req);
 		if (newsk == NULL)
 			return NULL;
 
@@ -462,22 +435,11 @@
 	if (sk_acceptq_is_full(sk))
 		goto out_overflow;
 
-	if (dst == NULL) {
-		struct in6_addr *final_p, final;
+	if (!dst) {
 		struct flowi6 fl6;
 
-		memset(&fl6, 0, sizeof(fl6));
-		fl6.flowi6_proto = IPPROTO_DCCP;
-		fl6.daddr = ireq->ir_v6_rmt_addr;
-		final_p = fl6_update_dst(&fl6, np->opt, &final);
-		fl6.saddr = ireq->ir_v6_loc_addr;
-		fl6.flowi6_oif = sk->sk_bound_dev_if;
-		fl6.fl6_dport = ireq->ir_rmt_port;
-		fl6.fl6_sport = htons(ireq->ir_num);
-		security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
-
-		dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
-		if (IS_ERR(dst))
+		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_DCCP);
+		if (!dst)
 			goto out;
 	}
 
@@ -552,7 +514,7 @@
 		dccp_done(newsk);
 		goto out;
 	}
-	__inet_hash(newsk, NULL);
+	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 
 	return newsk;
 
@@ -651,24 +613,6 @@
 	 * NOTE: the check for the packet types is done in
 	 *	 dccp_rcv_state_process
 	 */
-	if (sk->sk_state == DCCP_LISTEN) {
-		struct sock *nsk = dccp_v6_hnd_req(sk, skb);
-
-		if (nsk == NULL)
-			goto discard;
-		/*
-		 * Queue it on the new socket if the new socket is active,
-		 * otherwise we just shortcircuit this and continue with
-		 * the new socket..
-		 */
-		if (nsk != sk) {
-			if (dccp_child_process(sk, nsk, skb))
-				goto reset;
-			if (opt_skb != NULL)
-				__kfree_skb(opt_skb);
-			return 0;
-		}
-	}
 
 	if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
 		goto reset;
@@ -715,16 +659,11 @@
 	else
 		DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
 
-	/* Step 2:
-	 *	Look up flow ID in table and get corresponding socket */
+lookup:
 	sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
 			        dh->dccph_sport, dh->dccph_dport,
 				inet6_iif(skb));
-	/*
-	 * Step 2:
-	 *	If no socket ...
-	 */
-	if (sk == NULL) {
+	if (!sk) {
 		dccp_pr_debug("failed to look up flow ID in table and "
 			      "get corresponding socket\n");
 		goto no_dccp_socket;
@@ -742,6 +681,31 @@
 		goto no_dccp_socket;
 	}
 
+	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
+		struct request_sock *req = inet_reqsk(sk);
+		struct sock *nsk = NULL;
+
+		sk = req->rsk_listener;
+		if (likely(sk->sk_state == DCCP_LISTEN)) {
+			nsk = dccp_check_req(sk, skb, req);
+		} else {
+			inet_csk_reqsk_queue_drop_and_put(sk, req);
+			goto lookup;
+		}
+		if (!nsk) {
+			reqsk_put(req);
+			goto discard_it;
+		}
+		if (nsk == sk) {
+			sock_hold(sk);
+			reqsk_put(req);
+		} else if (dccp_child_process(sk, nsk, skb)) {
+			dccp_v6_ctl_send_reset(sk, skb);
+			goto discard_it;
+		} else {
+			return 0;
+		}
+	}
 	/*
 	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
 	 *	o if MinCsCov = 0, only packets with CsCov = 0 are accepted
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 838f524..1994f8a 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -72,7 +72,7 @@
 	dccp_done(sk);
 }
 
-struct sock *dccp_create_openreq_child(struct sock *sk,
+struct sock *dccp_create_openreq_child(const struct sock *sk,
 				       const struct request_sock *req,
 				       const struct sk_buff *skb)
 {
@@ -143,6 +143,7 @@
 {
 	struct sock *child = NULL;
 	struct dccp_request_sock *dreq = dccp_rsk(req);
+	bool own_req;
 
 	/* Check for retransmitted REQUEST */
 	if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
@@ -182,14 +183,13 @@
 	if (dccp_parse_options(sk, dreq, skb))
 		 goto drop;
 
-	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
-	if (child == NULL)
+	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+							 req, &own_req);
+	if (!child)
 		goto listen_overflow;
 
-	inet_csk_reqsk_queue_drop(sk, req);
-	inet_csk_reqsk_queue_add(sk, req, child);
-out:
-	return child;
+	return inet_csk_complete_hashdance(sk, child, req, own_req);
+
 listen_overflow:
 	dccp_pr_debug("listen_overflow!\n");
 	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
@@ -198,7 +198,7 @@
 		req->rsk_ops->send_reset(sk, skb);
 
 	inet_csk_reqsk_queue_drop(sk, req);
-	goto out;
+	return NULL;
 }
 
 EXPORT_SYMBOL_GPL(dccp_check_req);
@@ -236,7 +236,7 @@
 
 EXPORT_SYMBOL_GPL(dccp_child_process);
 
-void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 			 struct request_sock *rsk)
 {
 	DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 4b02dd3..849805e 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -85,7 +85,7 @@
 	if (dst) {
 try_again:
 		skb_dst_set(skb, dst);
-		dst_output(skb->sk, skb);
+		dst_output(&init_net, skb->sk, skb);
 		return;
 	}
 
@@ -582,7 +582,7 @@
 	 * associations.
 	 */
 	skb_dst_set(skb, dst_clone(dst));
-	dst_output(skb->sk, skb);
+	dst_output(&init_net, skb->sk, skb);
 }
 
 
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index e930321..607a14f 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -744,7 +744,7 @@
 	return NET_RX_DROP;
 }
 
-static int dn_output(struct sock *sk, struct sk_buff *skb)
+static int dn_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
 	struct dn_route *rt = (struct dn_route *)dst;
@@ -789,9 +789,7 @@
 	struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
 	struct dn_route *rt;
 	int header_len;
-#ifdef CONFIG_NETFILTER
 	struct net_device *dev = skb->dev;
-#endif
 
 	if (skb->pkt_type != PACKET_HOST)
 		goto drop;
@@ -832,7 +830,7 @@
  * Used to catch bugs. This should never normally get
  * called.
  */
-static int dn_rt_bug_sk(struct sock *sk, struct sk_buff *skb)
+static int dn_rt_bug_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
 
@@ -1469,7 +1467,7 @@
 
 	rt->n = neigh;
 	rt->dst.lastuse = jiffies;
-	rt->dst.output = dn_rt_bug_sk;
+	rt->dst.output = dn_rt_bug_out;
 	switch (res.type) {
 	case RTN_UNICAST:
 		rt->dst.input = dn_forward;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index c59fa5d..1eba07f 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -22,6 +22,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_net.h>
 #include <linux/sysfs.h>
+#include <linux/phy_fixed.h>
 #include "dsa_priv.h"
 
 char dsa_driver_version[] = "0.1";
@@ -305,7 +306,7 @@
 	if (ret < 0)
 		goto out;
 
-	ds->slave_mii_bus = mdiobus_alloc();
+	ds->slave_mii_bus = devm_mdiobus_alloc(parent);
 	if (ds->slave_mii_bus == NULL) {
 		ret = -ENOMEM;
 		goto out;
@@ -314,7 +315,7 @@
 
 	ret = mdiobus_register(ds->slave_mii_bus);
 	if (ret < 0)
-		goto out_free;
+		goto out;
 
 
 	/*
@@ -326,8 +327,8 @@
 
 		ret = dsa_slave_create(ds, parent, i, pd->port_names[i]);
 		if (ret < 0) {
-			netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s)\n",
-				   index, i, pd->port_names[i]);
+			netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n",
+				   index, i, pd->port_names[i], ret);
 			ret = 0;
 		}
 	}
@@ -367,10 +368,7 @@
 
 	return ret;
 
-out_free:
-	mdiobus_free(ds->slave_mii_bus);
 out:
-	kfree(ds);
 	return ret;
 }
 
@@ -400,7 +398,7 @@
 	/*
 	 * Allocate and initialise switch state.
 	 */
-	ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
+	ds = devm_kzalloc(parent, sizeof(*ds) + drv->priv_size, GFP_KERNEL);
 	if (ds == NULL)
 		return ERR_PTR(-ENOMEM);
 
@@ -420,10 +418,47 @@
 
 static void dsa_switch_destroy(struct dsa_switch *ds)
 {
+	struct device_node *port_dn;
+	struct phy_device *phydev;
+	struct dsa_chip_data *cd = ds->pd;
+	int port;
+
 #ifdef CONFIG_NET_DSA_HWMON
 	if (ds->hwmon_dev)
 		hwmon_device_unregister(ds->hwmon_dev);
 #endif
+
+	/* Disable configuration of the CPU and DSA ports */
+	for (port = 0; port < DSA_MAX_PORTS; port++) {
+		if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
+			continue;
+
+		port_dn = cd->port_dn[port];
+		if (of_phy_is_fixed_link(port_dn)) {
+			phydev = of_phy_find_device(port_dn);
+			if (phydev) {
+				int addr = phydev->addr;
+
+				phy_device_free(phydev);
+				of_node_put(port_dn);
+				fixed_phy_del(addr);
+			}
+		}
+	}
+
+	/* Destroy network devices for physical switch ports. */
+	for (port = 0; port < DSA_MAX_PORTS; port++) {
+		if (!(ds->phys_port_mask & (1 << port)))
+			continue;
+
+		if (!ds->ports[port])
+			continue;
+
+		unregister_netdev(ds->ports[port]);
+		free_netdev(ds->ports[port]);
+	}
+
+	mdiobus_unregister(ds->slave_mii_bus);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -802,10 +837,11 @@
 }
 #endif
 
-static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
-			  struct device *parent, struct dsa_platform_data *pd)
+static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
+			 struct device *parent, struct dsa_platform_data *pd)
 {
 	int i;
+	unsigned configured = 0;
 
 	dst->pd = pd;
 	dst->master_netdev = dev;
@@ -825,9 +861,17 @@
 		dst->ds[i] = ds;
 		if (ds->drv->poll_link != NULL)
 			dst->link_poll_needed = 1;
+
+		++configured;
 	}
 
 	/*
+	 * If no switch was found, exit cleanly
+	 */
+	if (!configured)
+		return -EPROBE_DEFER;
+
+	/*
 	 * If we use a tagging format that doesn't have an ethertype
 	 * field, make sure that all packets from this point on get
 	 * sent to the tag format's receive function.
@@ -843,6 +887,8 @@
 		dst->link_poll_timer.expires = round_jiffies(jiffies + HZ);
 		add_timer(&dst->link_poll_timer);
 	}
+
+	return 0;
 }
 
 static int dsa_probe(struct platform_device *pdev)
@@ -883,7 +929,7 @@
 		goto out;
 	}
 
-	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+	dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
 	if (dst == NULL) {
 		dev_put(dev);
 		ret = -ENOMEM;
@@ -892,7 +938,9 @@
 
 	platform_set_drvdata(pdev, dst);
 
-	dsa_setup_dst(dst, dev, &pdev->dev, pd);
+	ret = dsa_setup_dst(dst, dev, &pdev->dev, pd);
+	if (ret)
+		goto out;
 
 	return 0;
 
@@ -914,7 +962,7 @@
 	for (i = 0; i < dst->pd->nr_chips; i++) {
 		struct dsa_switch *ds = dst->ds[i];
 
-		if (ds != NULL)
+		if (ds)
 			dsa_switch_destroy(ds);
 	}
 }
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index f18cae5..481754e 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -242,10 +242,9 @@
 }
 
 static int dsa_slave_port_vlan_add(struct net_device *dev,
-				   struct switchdev_obj *obj,
+				   const struct switchdev_obj_port_vlan *vlan,
 				   struct switchdev_trans *trans)
 {
-	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 	u16 vid;
@@ -279,9 +278,8 @@
 }
 
 static int dsa_slave_port_vlan_del(struct net_device *dev,
-				   struct switchdev_obj *obj)
+				   const struct switchdev_obj_port_vlan *vlan)
 {
-	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 	u16 vid;
@@ -300,9 +298,9 @@
 }
 
 static int dsa_slave_port_vlan_dump(struct net_device *dev,
-				    struct switchdev_obj *obj)
+				    struct switchdev_obj_port_vlan *vlan,
+				    switchdev_obj_dump_cb_t *cb)
 {
-	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 	DECLARE_BITMAP(members, DSA_MAX_PORTS);
@@ -334,7 +332,7 @@
 		if (test_bit(p->port, untagged))
 			vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
 
-		err = obj->cb(dev, obj);
+		err = cb(&vlan->obj);
 		if (err)
 			break;
 	}
@@ -343,66 +341,48 @@
 }
 
 static int dsa_slave_port_fdb_add(struct net_device *dev,
-				  struct switchdev_obj *obj,
+				  const struct switchdev_obj_port_fdb *fdb,
 				  struct switchdev_trans *trans)
 {
-	struct switchdev_obj_fdb *fdb = &obj->u.fdb;
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
-	int ret = -EOPNOTSUPP;
+	int ret;
+
+	if (!ds->drv->port_fdb_prepare || !ds->drv->port_fdb_add)
+		return -EOPNOTSUPP;
 
 	if (switchdev_trans_ph_prepare(trans))
-		ret = ds->drv->port_fdb_add ? 0 : -EOPNOTSUPP;
+		ret = ds->drv->port_fdb_prepare(ds, p->port, fdb, trans);
 	else
-		ret = ds->drv->port_fdb_add(ds, p->port, fdb->addr, fdb->vid);
+		ret = ds->drv->port_fdb_add(ds, p->port, fdb, trans);
 
 	return ret;
 }
 
 static int dsa_slave_port_fdb_del(struct net_device *dev,
-				  struct switchdev_obj *obj)
+				  const struct switchdev_obj_port_fdb *fdb)
 {
-	struct switchdev_obj_fdb *fdb = &obj->u.fdb;
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 	int ret = -EOPNOTSUPP;
 
 	if (ds->drv->port_fdb_del)
-		ret = ds->drv->port_fdb_del(ds, p->port, fdb->addr, fdb->vid);
+		ret = ds->drv->port_fdb_del(ds, p->port, fdb);
 
 	return ret;
 }
 
 static int dsa_slave_port_fdb_dump(struct net_device *dev,
-				   struct switchdev_obj *obj)
+				   struct switchdev_obj_port_fdb *fdb,
+				   switchdev_obj_dump_cb_t *cb)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
-	unsigned char addr[ETH_ALEN] = { 0 };
-	u16 vid = 0;
-	int ret;
 
-	if (!ds->drv->port_fdb_getnext)
-		return -EOPNOTSUPP;
+	if (ds->drv->port_fdb_dump)
+		return ds->drv->port_fdb_dump(ds, p->port, fdb, cb);
 
-	for (;;) {
-		bool is_static;
-
-		ret = ds->drv->port_fdb_getnext(ds, p->port, addr, &vid,
-						&is_static);
-		if (ret < 0)
-			break;
-
-		obj->u.fdb.addr = addr;
-		obj->u.fdb.vid = vid;
-		obj->u.fdb.ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
-
-		ret = obj->cb(dev, obj);
-		if (ret < 0)
-			break;
-	}
-
-	return ret == -ENOENT ? 0 : ret;
+	return -EOPNOTSUPP;
 }
 
 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -453,15 +433,20 @@
 }
 
 static int dsa_slave_port_attr_set(struct net_device *dev,
-				   struct switchdev_attr *attr,
+				   const struct switchdev_attr *attr,
 				   struct switchdev_trans *trans)
 {
-	int ret = 0;
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+	int ret;
 
 	switch (attr->id) {
-	case SWITCHDEV_ATTR_PORT_STP_STATE:
-		if (switchdev_trans_ph_commit(trans))
-			ret = dsa_slave_stp_update(dev, attr->u.stp_state);
+	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+		if (switchdev_trans_ph_prepare(trans))
+			ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP;
+		else
+			ret = ds->drv->port_stp_update(ds, p->port,
+						       attr->u.stp_state);
 		break;
 	default:
 		ret = -EOPNOTSUPP;
@@ -472,7 +457,7 @@
 }
 
 static int dsa_slave_port_obj_add(struct net_device *dev,
-				  struct switchdev_obj *obj,
+				  const struct switchdev_obj *obj,
 				  struct switchdev_trans *trans)
 {
 	int err;
@@ -483,11 +468,15 @@
 	 */
 
 	switch (obj->id) {
-	case SWITCHDEV_OBJ_PORT_FDB:
-		err = dsa_slave_port_fdb_add(dev, obj, trans);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = dsa_slave_port_fdb_add(dev,
+					     SWITCHDEV_OBJ_PORT_FDB(obj),
+					     trans);
 		break;
-	case SWITCHDEV_OBJ_PORT_VLAN:
-		err = dsa_slave_port_vlan_add(dev, obj, trans);
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = dsa_slave_port_vlan_add(dev,
+					      SWITCHDEV_OBJ_PORT_VLAN(obj),
+					      trans);
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -498,16 +487,18 @@
 }
 
 static int dsa_slave_port_obj_del(struct net_device *dev,
-				  struct switchdev_obj *obj)
+				  const struct switchdev_obj *obj)
 {
 	int err;
 
 	switch (obj->id) {
-	case SWITCHDEV_OBJ_PORT_FDB:
-		err = dsa_slave_port_fdb_del(dev, obj);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = dsa_slave_port_fdb_del(dev,
+					     SWITCHDEV_OBJ_PORT_FDB(obj));
 		break;
-	case SWITCHDEV_OBJ_PORT_VLAN:
-		err = dsa_slave_port_vlan_del(dev, obj);
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = dsa_slave_port_vlan_del(dev,
+					      SWITCHDEV_OBJ_PORT_VLAN(obj));
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -518,16 +509,21 @@
 }
 
 static int dsa_slave_port_obj_dump(struct net_device *dev,
-				   struct switchdev_obj *obj)
+				   struct switchdev_obj *obj,
+				   switchdev_obj_dump_cb_t *cb)
 {
 	int err;
 
 	switch (obj->id) {
-	case SWITCHDEV_OBJ_PORT_FDB:
-		err = dsa_slave_port_fdb_dump(dev, obj);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		err = dsa_slave_port_fdb_dump(dev,
+					      SWITCHDEV_OBJ_PORT_FDB(obj),
+					      cb);
 		break;
-	case SWITCHDEV_OBJ_PORT_VLAN:
-		err = dsa_slave_port_vlan_dump(dev, obj);
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		err = dsa_slave_port_vlan_dump(dev,
+					       SWITCHDEV_OBJ_PORT_VLAN(obj),
+					       cb);
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -581,7 +577,7 @@
 	struct dsa_switch *ds = p->parent;
 
 	switch (attr->id) {
-	case SWITCHDEV_ATTR_PORT_PARENT_ID:
+	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
 		attr->u.ppid.id_len = sizeof(ds->index);
 		memcpy(&attr->u.ppid.id, &ds->index, attr->u.ppid.id_len);
 		break;
@@ -1013,8 +1009,10 @@
 	struct dsa_switch *ds = p->parent;
 
 	p->phy = ds->slave_mii_bus->phy_map[addr];
-	if (!p->phy)
+	if (!p->phy) {
+		netdev_err(slave_dev, "no phy at %d\n", addr);
 		return -ENODEV;
+	}
 
 	/* Use already configured phy mode */
 	if (p->phy_interface == PHY_INTERFACE_MODE_NA)
@@ -1048,7 +1046,7 @@
 		 */
 		ret = of_phy_register_fixed_link(port_dn);
 		if (ret) {
-			netdev_err(slave_dev, "failed to register fixed PHY\n");
+			netdev_err(slave_dev, "failed to register fixed PHY: %d\n", ret);
 			return ret;
 		}
 		phy_is_fixed = true;
@@ -1059,17 +1057,20 @@
 		phy_flags = ds->drv->get_phy_flags(ds, p->port);
 
 	if (phy_dn) {
-		ret = of_mdio_parse_addr(&slave_dev->dev, phy_dn);
+		int phy_id = of_mdio_parse_addr(&slave_dev->dev, phy_dn);
+
 		/* If this PHY address is part of phys_mii_mask, which means
 		 * that we need to divert reads and writes to/from it, then we
 		 * want to bind this device using the slave MII bus created by
 		 * DSA to make that happen.
 		 */
-		if (!phy_is_fixed && ret >= 0 &&
-		    (ds->phys_mii_mask & (1 << ret))) {
-			ret = dsa_slave_phy_connect(p, slave_dev, ret);
-			if (ret)
+		if (!phy_is_fixed && phy_id >= 0 &&
+		    (ds->phys_mii_mask & (1 << phy_id))) {
+			ret = dsa_slave_phy_connect(p, slave_dev, phy_id);
+			if (ret) {
+				netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret);
 				return ret;
+			}
 		} else {
 			p->phy = of_phy_connect(slave_dev, phy_dn,
 						dsa_slave_adjust_link,
@@ -1086,8 +1087,10 @@
 	 */
 	if (!p->phy) {
 		ret = dsa_slave_phy_connect(p, slave_dev, p->port);
-		if (ret)
+		if (ret) {
+			netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret);
 			return ret;
+		}
 	} else {
 		netdev_info(slave_dev, "attached PHY at address %d [%s]\n",
 			    p->phy->addr, p->phy->drv->name);
@@ -1199,6 +1202,7 @@
 
 	ret = dsa_slave_phy_setup(p, slave_dev);
 	if (ret) {
+		netdev_err(master, "error %d setting up slave phy\n", ret);
 		free_netdev(slave_dev);
 		return ret;
 	}
@@ -1252,7 +1256,7 @@
 			goto out;
 
 		err = dsa_slave_master_changed(dev);
-		if (err)
+		if (err && err != -EOPNOTSUPP)
 			netdev_warn(dev, "failed to reflect master change\n");
 
 		break;
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 9f0cfa5..20c49c7 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -101,14 +101,9 @@
 
 static void lowpan_setup(struct net_device *ldev)
 {
-	ldev->addr_len		= IEEE802154_ADDR_LEN;
 	memset(ldev->broadcast, 0xff, IEEE802154_ADDR_LEN);
-	ldev->type		= ARPHRD_6LOWPAN;
-	/* Frame Control + Sequence Number + Address fields + Security Header */
-	ldev->hard_header_len	= 2 + 1 + 20 + 14;
-	ldev->needed_tailroom	= 2; /* FCS */
-	ldev->mtu		= IPV6_MIN_MTU;
-	ldev->priv_flags	|= IFF_NO_QUEUE;
+	/* We need an ipv6hdr as minimum len when calling xmit */
+	ldev->hard_header_len	= sizeof(struct ipv6hdr);
 	ldev->flags		= IFF_BROADCAST | IFF_MULTICAST;
 
 	ldev->netdev_ops	= &lowpan_netdev_ops;
@@ -156,6 +151,15 @@
 	lowpan_dev_info(ldev)->wdev = wdev;
 	/* Set the lowpan hardware address to the wpan hardware address. */
 	memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN);
+	/* We need headroom for possible wpan_dev_hard_header call and tailroom
+	 * for encryption/fcs handling. The lowpan interface will replace
+	 * the IPv6 header with 6LoWPAN header. At worst case the 6LoWPAN
+	 * header has LOWPAN_IPHC_MAX_HEADER_LEN more bytes than the IPv6
+	 * header.
+	 */
+	ldev->needed_headroom = LOWPAN_IPHC_MAX_HEADER_LEN +
+				wdev->needed_headroom;
+	ldev->needed_tailroom = wdev->needed_tailroom;
 
 	lowpan_netdev_setup(ldev, LOWPAN_LLTYPE_IEEE802154);
 
diff --git a/net/ieee802154/6lowpan/rx.c b/net/ieee802154/6lowpan/rx.c
index b1fd47d..ef185dd 100644
--- a/net/ieee802154/6lowpan/rx.c
+++ b/net/ieee802154/6lowpan/rx.c
@@ -29,6 +29,8 @@
 static int lowpan_give_skb_to_device(struct sk_buff *skb)
 {
 	skb->protocol = htons(ETH_P_IPV6);
+	skb->dev->stats.rx_packets++;
+	skb->dev->stats.rx_bytes += skb->len;
 
 	return netif_rx(skb);
 }
@@ -88,36 +90,12 @@
 
 int lowpan_iphc_decompress(struct sk_buff *skb)
 {
-	struct ieee802154_addr_sa sa, da;
 	struct ieee802154_hdr hdr;
-	u8 iphc0, iphc1;
-	void *sap, *dap;
 
 	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
 		return -EINVAL;
 
-	raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
-
-	if (lowpan_fetch_skb_u8(skb, &iphc0) ||
-	    lowpan_fetch_skb_u8(skb, &iphc1))
-		return -EINVAL;
-
-	ieee802154_addr_to_sa(&sa, &hdr.source);
-	ieee802154_addr_to_sa(&da, &hdr.dest);
-
-	if (sa.addr_type == IEEE802154_ADDR_SHORT)
-		sap = &sa.short_addr;
-	else
-		sap = &sa.hwaddr;
-
-	if (da.addr_type == IEEE802154_ADDR_SHORT)
-		dap = &da.short_addr;
-	else
-		dap = &da.hwaddr;
-
-	return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type,
-					IEEE802154_ADDR_LEN, dap, da.addr_type,
-					IEEE802154_ADDR_LEN, iphc0, iphc1);
+	return lowpan_header_decompress(skb, skb->dev, &hdr.dest, &hdr.source);
 }
 
 static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb)
@@ -306,16 +284,16 @@
 	if (wdev->type != ARPHRD_IEEE802154 ||
 	    skb->pkt_type == PACKET_OTHERHOST ||
 	    !lowpan_rx_h_check(skb))
-		return NET_RX_DROP;
+		goto drop;
 
 	ldev = wdev->ieee802154_ptr->lowpan_dev;
 	if (!ldev || !netif_running(ldev))
-		return NET_RX_DROP;
+		goto drop;
 
 	/* Replacing skb->dev and followed rx handlers will manipulate skb. */
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	if (!skb)
-		return NET_RX_DROP;
+		goto out;
 	skb->dev = ldev;
 
 	/* When receive frag1 it's likely that we manipulate the buffer.
@@ -326,10 +304,15 @@
 	    lowpan_is_iphc(*skb_network_header(skb))) {
 		skb = skb_unshare(skb, GFP_ATOMIC);
 		if (!skb)
-			return NET_RX_DROP;
+			goto out;
 	}
 
 	return lowpan_invoke_rx_handlers(skb);
+
+drop:
+	kfree_skb(skb);
+out:
+	return NET_RX_DROP;
 }
 
 static struct packet_type lowpan_packet_type = {
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index 54939d0..d4353fa 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -10,9 +10,13 @@
 
 #include <net/6lowpan.h>
 #include <net/ieee802154_netdev.h>
+#include <net/mac802154.h>
 
 #include "6lowpan_i.h"
 
+#define LOWPAN_FRAG1_HEAD_SIZE	0x4
+#define LOWPAN_FRAGN_HEAD_SIZE	0x5
+
 /* don't save pan id, it's intra pan */
 struct lowpan_addr {
 	u8 mode;
@@ -36,6 +40,13 @@
 			sizeof(struct lowpan_addr_info));
 }
 
+/* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET
+ * sockets gives an 8 byte array for addresses only!
+ *
+ * TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no
+ * sense here. We should disable it, the right use-case would be AF_INET6
+ * RAW/DGRAM sockets.
+ */
 int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
 			 unsigned short type, const void *_daddr,
 			 const void *_saddr, unsigned int len)
@@ -71,27 +82,33 @@
 
 static struct sk_buff*
 lowpan_alloc_frag(struct sk_buff *skb, int size,
-		  const struct ieee802154_hdr *master_hdr)
+		  const struct ieee802154_hdr *master_hdr, bool frag1)
 {
 	struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev;
 	struct sk_buff *frag;
 	int rc;
 
-	frag = alloc_skb(wdev->hard_header_len + wdev->needed_tailroom + size,
+	frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size,
 			 GFP_ATOMIC);
 
 	if (likely(frag)) {
 		frag->dev = wdev;
 		frag->priority = skb->priority;
-		skb_reserve(frag, wdev->hard_header_len);
+		skb_reserve(frag, wdev->needed_headroom);
 		skb_reset_network_header(frag);
 		*mac_cb(frag) = *mac_cb(skb);
 
-		rc = dev_hard_header(frag, wdev, 0, &master_hdr->dest,
-				     &master_hdr->source, size);
-		if (rc < 0) {
-			kfree_skb(frag);
-			return ERR_PTR(rc);
+		if (frag1) {
+			memcpy(skb_put(frag, skb->mac_len),
+			       skb_mac_header(skb), skb->mac_len);
+		} else {
+			rc = wpan_dev_hard_header(frag, wdev,
+						  &master_hdr->dest,
+						  &master_hdr->source, size);
+			if (rc < 0) {
+				kfree_skb(frag);
+				return ERR_PTR(rc);
+			}
 		}
 	} else {
 		frag = ERR_PTR(-ENOMEM);
@@ -103,13 +120,13 @@
 static int
 lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
 		     u8 *frag_hdr, int frag_hdrlen,
-		     int offset, int len)
+		     int offset, int len, bool frag1)
 {
 	struct sk_buff *frag;
 
 	raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen);
 
-	frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
+	frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1);
 	if (IS_ERR(frag))
 		return PTR_ERR(frag);
 
@@ -148,7 +165,8 @@
 
 	rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
 				  LOWPAN_FRAG1_HEAD_SIZE, 0,
-				  frag_len + skb_network_header_len(skb));
+				  frag_len + skb_network_header_len(skb),
+				  true);
 	if (rc) {
 		pr_debug("%s unable to send FRAG1 packet (tag: %d)",
 			 __func__, ntohs(frag_tag));
@@ -169,7 +187,7 @@
 
 		rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr,
 					  LOWPAN_FRAGN_HEAD_SIZE, skb_offset,
-					  frag_len);
+					  frag_len, false);
 		if (rc) {
 			pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
 				 __func__, ntohs(frag_tag), skb_offset);
@@ -177,6 +195,8 @@
 		}
 	} while (skb_unprocessed > frag_cap);
 
+	ldev->stats.tx_packets++;
+	ldev->stats.tx_bytes += dgram_size;
 	consume_skb(skb);
 	return NET_XMIT_SUCCESS;
 
@@ -201,7 +221,7 @@
 	saddr = &info.saddr.u.extended_addr;
 
 	*dgram_size = skb->len;
-	lowpan_header_compress(skb, ldev, ETH_P_IPV6, daddr, saddr, skb->len);
+	lowpan_header_compress(skb, ldev, daddr, saddr);
 	/* dgram_offset = (saved bytes after compression) + lowpan header len */
 	*dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
 
@@ -218,7 +238,7 @@
 	/* if the destination address is the broadcast address, use the
 	 * corresponding short address
 	 */
-	if (lowpan_is_addr_broadcast((const u8 *)daddr)) {
+	if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
 		da.mode = IEEE802154_ADDR_SHORT;
 		da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
 		cb->ackreq = false;
@@ -228,8 +248,8 @@
 		cb->ackreq = wpan_dev->ackreq;
 	}
 
-	return dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, ETH_P_IPV6,
-			       (void *)&da, (void *)&sa, 0);
+	return wpan_dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, &da, &sa,
+				    0);
 }
 
 netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
@@ -240,6 +260,8 @@
 
 	pr_debug("package xmit\n");
 
+	WARN_ON_ONCE(skb->len > IPV6_MIN_MTU);
+
 	/* We must take a copy of the skb before we modify/replace the ipv6
 	 * header as the header could be used elsewhere
 	 */
@@ -262,6 +284,8 @@
 
 	if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
 		skb->dev = lowpan_dev_info(ldev)->wdev;
+		ldev->stats.tx_packets++;
+		ldev->stats.tx_bytes += dgram_size;
 		return dev_queue_xmit(skb);
 	} else {
 		netdev_tx_t rc;
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
index 1370d5b..188135b 100644
--- a/net/ieee802154/Kconfig
+++ b/net/ieee802154/Kconfig
@@ -12,6 +12,11 @@
 
 if IEEE802154
 
+config IEEE802154_NL802154_EXPERIMENTAL
+	bool "IEEE 802.15.4 experimental netlink support"
+	---help---
+	  Adds experimental netlink support for nl802154.
+
 config IEEE802154_SOCKET
 	tristate "IEEE 802.15.4 socket interface"
 	default y
diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c
index b0248e9..c35fdfa 100644
--- a/net/ieee802154/core.c
+++ b/net/ieee802154/core.c
@@ -95,6 +95,18 @@
 	return result;
 }
 
+struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx)
+{
+	struct cfg802154_registered_device *rdev;
+
+	ASSERT_RTNL();
+
+	rdev = cfg802154_rdev_by_wpan_phy_idx(wpan_phy_idx);
+	if (!rdev)
+		return NULL;
+	return &rdev->wpan_phy;
+}
+
 struct wpan_phy *
 wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
 {
diff --git a/net/ieee802154/core.h b/net/ieee802154/core.h
index f3e9558..231fade 100644
--- a/net/ieee802154/core.h
+++ b/net/ieee802154/core.h
@@ -42,5 +42,6 @@
 void cfg802154_dev_free(struct cfg802154_registered_device *rdev);
 struct cfg802154_registered_device *
 cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx);
+struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx);
 
 #endif /* __IEEE802154_CORE_H */
diff --git a/net/ieee802154/header_ops.c b/net/ieee802154/header_ops.c
index a051b69..c7439f0 100644
--- a/net/ieee802154/header_ops.c
+++ b/net/ieee802154/header_ops.c
@@ -83,35 +83,35 @@
 }
 
 int
-ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
+ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr)
 {
-	u8 buf[MAC802154_FRAME_HARD_HEADER_LEN];
+	u8 buf[IEEE802154_MAX_HEADER_LEN];
 	int pos = 2;
 	int rc;
-	struct ieee802154_hdr_fc fc = hdr->fc;
+	struct ieee802154_hdr_fc *fc = &hdr->fc;
 
 	buf[pos++] = hdr->seq;
 
-	fc.dest_addr_mode = hdr->dest.mode;
+	fc->dest_addr_mode = hdr->dest.mode;
 
 	rc = ieee802154_hdr_push_addr(buf + pos, &hdr->dest, false);
 	if (rc < 0)
 		return -EINVAL;
 	pos += rc;
 
-	fc.source_addr_mode = hdr->source.mode;
+	fc->source_addr_mode = hdr->source.mode;
 
 	if (hdr->source.pan_id == hdr->dest.pan_id &&
 	    hdr->dest.mode != IEEE802154_ADDR_NONE)
-		fc.intra_pan = true;
+		fc->intra_pan = true;
 
-	rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc.intra_pan);
+	rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc->intra_pan);
 	if (rc < 0)
 		return -EINVAL;
 	pos += rc;
 
-	if (fc.security_enabled) {
-		fc.version = 1;
+	if (fc->security_enabled) {
+		fc->version = 1;
 
 		rc = ieee802154_hdr_push_sechdr(buf + pos, &hdr->sec);
 		if (rc < 0)
@@ -120,7 +120,7 @@
 		pos += rc;
 	}
 
-	memcpy(buf, &fc, 2);
+	memcpy(buf, fc, 2);
 
 	memcpy(skb_push(skb, pos), buf, pos);
 
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 3f89c0a..16ef0d9 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -232,8 +232,86 @@
 	[NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
 
 	[NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 },
+
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	[NL802154_ATTR_SEC_ENABLED] = { .type = NLA_U8, },
+	[NL802154_ATTR_SEC_OUT_LEVEL] = { .type = NLA_U32, },
+	[NL802154_ATTR_SEC_OUT_KEY_ID] = { .type = NLA_NESTED, },
+	[NL802154_ATTR_SEC_FRAME_COUNTER] = { .type = NLA_U32 },
+
+	[NL802154_ATTR_SEC_LEVEL] = { .type = NLA_NESTED },
+	[NL802154_ATTR_SEC_DEVICE] = { .type = NLA_NESTED },
+	[NL802154_ATTR_SEC_DEVKEY] = { .type = NLA_NESTED },
+	[NL802154_ATTR_SEC_KEY] = { .type = NLA_NESTED },
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 };
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+static int
+nl802154_prepare_wpan_dev_dump(struct sk_buff *skb,
+			       struct netlink_callback *cb,
+			       struct cfg802154_registered_device **rdev,
+			       struct wpan_dev **wpan_dev)
+{
+	int err;
+
+	rtnl_lock();
+
+	if (!cb->args[0]) {
+		err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl802154_fam.hdrsize,
+				  nl802154_fam.attrbuf, nl802154_fam.maxattr,
+				  nl802154_policy);
+		if (err)
+			goto out_unlock;
+
+		*wpan_dev = __cfg802154_wpan_dev_from_attrs(sock_net(skb->sk),
+							    nl802154_fam.attrbuf);
+		if (IS_ERR(*wpan_dev)) {
+			err = PTR_ERR(*wpan_dev);
+			goto out_unlock;
+		}
+		*rdev = wpan_phy_to_rdev((*wpan_dev)->wpan_phy);
+		/* 0 is the first index - add 1 to parse only once */
+		cb->args[0] = (*rdev)->wpan_phy_idx + 1;
+		cb->args[1] = (*wpan_dev)->identifier;
+	} else {
+		/* subtract the 1 again here */
+		struct wpan_phy *wpan_phy = wpan_phy_idx_to_wpan_phy(cb->args[0] - 1);
+		struct wpan_dev *tmp;
+
+		if (!wpan_phy) {
+			err = -ENODEV;
+			goto out_unlock;
+		}
+		*rdev = wpan_phy_to_rdev(wpan_phy);
+		*wpan_dev = NULL;
+
+		list_for_each_entry(tmp, &(*rdev)->wpan_dev_list, list) {
+			if (tmp->identifier == cb->args[1]) {
+				*wpan_dev = tmp;
+				break;
+			}
+		}
+
+		if (!*wpan_dev) {
+			err = -ENODEV;
+			goto out_unlock;
+		}
+	}
+
+	return 0;
+ out_unlock:
+	rtnl_unlock();
+	return err;
+}
+
+static void
+nl802154_finish_wpan_dev_dump(struct cfg802154_registered_device *rdev)
+{
+	rtnl_unlock();
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 /* message building helper */
 static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
 				    int flags, u8 cmd)
@@ -612,6 +690,107 @@
 	       ((u64)wpan_phy_to_rdev(wpan_dev->wpan_phy)->wpan_phy_idx << 32);
 }
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+#include <net/ieee802154_netdev.h>
+
+static int
+ieee802154_llsec_send_key_id(struct sk_buff *msg,
+			     const struct ieee802154_llsec_key_id *desc)
+{
+	struct nlattr *nl_dev_addr;
+
+	if (nla_put_u32(msg, NL802154_KEY_ID_ATTR_MODE, desc->mode))
+		return -ENOBUFS;
+
+	switch (desc->mode) {
+	case NL802154_KEY_ID_MODE_IMPLICIT:
+		nl_dev_addr = nla_nest_start(msg, NL802154_KEY_ID_ATTR_IMPLICIT);
+		if (!nl_dev_addr)
+			return -ENOBUFS;
+
+		if (nla_put_le16(msg, NL802154_DEV_ADDR_ATTR_PAN_ID,
+				 desc->device_addr.pan_id) ||
+		    nla_put_u32(msg,  NL802154_DEV_ADDR_ATTR_MODE,
+				desc->device_addr.mode))
+			return -ENOBUFS;
+
+		switch (desc->device_addr.mode) {
+		case NL802154_DEV_ADDR_SHORT:
+			if (nla_put_le16(msg, NL802154_DEV_ADDR_ATTR_SHORT,
+					 desc->device_addr.short_addr))
+				return -ENOBUFS;
+			break;
+		case NL802154_DEV_ADDR_EXTENDED:
+			if (nla_put_le64(msg, NL802154_DEV_ADDR_ATTR_EXTENDED,
+					 desc->device_addr.extended_addr))
+				return -ENOBUFS;
+			break;
+		default:
+			/* userspace should handle unknown */
+			break;
+		}
+
+		nla_nest_end(msg, nl_dev_addr);
+		break;
+	case NL802154_KEY_ID_MODE_INDEX:
+		break;
+	case NL802154_KEY_ID_MODE_INDEX_SHORT:
+		/* TODO renmae short_source? */
+		if (nla_put_le32(msg, NL802154_KEY_ID_ATTR_SOURCE_SHORT,
+				 desc->short_source))
+			return -ENOBUFS;
+		break;
+	case NL802154_KEY_ID_MODE_INDEX_EXTENDED:
+		if (nla_put_le64(msg, NL802154_KEY_ID_ATTR_SOURCE_EXTENDED,
+				 desc->extended_source))
+			return -ENOBUFS;
+		break;
+	default:
+		/* userspace should handle unknown */
+		break;
+	}
+
+	/* TODO key_id to key_idx ? Check naming */
+	if (desc->mode != NL802154_KEY_ID_MODE_IMPLICIT) {
+		if (nla_put_u8(msg, NL802154_KEY_ID_ATTR_INDEX, desc->id))
+			return -ENOBUFS;
+	}
+
+	return 0;
+}
+
+static int nl802154_get_llsec_params(struct sk_buff *msg,
+				     struct cfg802154_registered_device *rdev,
+				     struct wpan_dev *wpan_dev)
+{
+	struct nlattr *nl_key_id;
+	struct ieee802154_llsec_params params;
+	int ret;
+
+	ret = rdev_get_llsec_params(rdev, wpan_dev, &params);
+	if (ret < 0)
+		return ret;
+
+	if (nla_put_u8(msg, NL802154_ATTR_SEC_ENABLED, params.enabled) ||
+	    nla_put_u32(msg, NL802154_ATTR_SEC_OUT_LEVEL, params.out_level) ||
+	    nla_put_be32(msg, NL802154_ATTR_SEC_FRAME_COUNTER,
+			 params.frame_counter))
+		return -ENOBUFS;
+
+	nl_key_id = nla_nest_start(msg, NL802154_ATTR_SEC_OUT_KEY_ID);
+	if (!nl_key_id)
+		return -ENOBUFS;
+
+	ret = ieee802154_llsec_send_key_id(msg, &params.out_key);
+	if (ret < 0)
+		return ret;
+
+	nla_nest_end(msg, nl_key_id);
+
+	return 0;
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 static int
 nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
 		    struct cfg802154_registered_device *rdev,
@@ -663,6 +842,11 @@
 	if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq))
 		goto nla_put_failure;
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0)
+		goto nla_put_failure;
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 	genlmsg_end(msg, hdr);
 	return 0;
 
@@ -753,10 +937,8 @@
 			return -EINVAL;
 	}
 
-	/* TODO add nla_get_le64 to netlink */
 	if (info->attrs[NL802154_ATTR_EXTENDED_ADDR])
-		extended_addr = (__force __le64)nla_get_u64(
-				info->attrs[NL802154_ATTR_EXTENDED_ADDR]);
+		extended_addr = nla_get_le64(info->attrs[NL802154_ATTR_EXTENDED_ADDR]);
 
 	if (!rdev->ops->add_virtual_intf)
 		return -EOPNOTSUPP;
@@ -1075,6 +1257,838 @@
 	return rdev_set_ackreq_default(rdev, wpan_dev, ackreq);
 }
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+static const struct nla_policy nl802154_dev_addr_policy[NL802154_DEV_ADDR_ATTR_MAX + 1] = {
+	[NL802154_DEV_ADDR_ATTR_PAN_ID] = { .type = NLA_U16 },
+	[NL802154_DEV_ADDR_ATTR_MODE] = { .type = NLA_U32 },
+	[NL802154_DEV_ADDR_ATTR_SHORT] = { .type = NLA_U16 },
+	[NL802154_DEV_ADDR_ATTR_EXTENDED] = { .type = NLA_U64 },
+};
+
+static int
+ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
+				struct ieee802154_addr *addr)
+{
+	struct nlattr *attrs[NL802154_DEV_ADDR_ATTR_MAX + 1];
+
+	if (!nla || nla_parse_nested(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla,
+				     nl802154_dev_addr_policy))
+		return -EINVAL;
+
+	if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] &&
+	    !attrs[NL802154_DEV_ADDR_ATTR_MODE] &&
+	    !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
+	      attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
+		return -EINVAL;
+
+	addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]);
+	addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]);
+	switch (addr->mode) {
+	case NL802154_DEV_ADDR_SHORT:
+		addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]);
+		break;
+	case NL802154_DEV_ADDR_EXTENDED:
+		addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct nla_policy nl802154_key_id_policy[NL802154_KEY_ID_ATTR_MAX + 1] = {
+	[NL802154_KEY_ID_ATTR_MODE] = { .type = NLA_U32 },
+	[NL802154_KEY_ID_ATTR_INDEX] = { .type = NLA_U8 },
+	[NL802154_KEY_ID_ATTR_IMPLICIT] = { .type = NLA_NESTED },
+	[NL802154_KEY_ID_ATTR_SOURCE_SHORT] = { .type = NLA_U32 },
+	[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED] = { .type = NLA_U64 },
+};
+
+static int
+ieee802154_llsec_parse_key_id(struct nlattr *nla,
+			      struct ieee802154_llsec_key_id *desc)
+{
+	struct nlattr *attrs[NL802154_KEY_ID_ATTR_MAX + 1];
+
+	if (!nla || nla_parse_nested(attrs, NL802154_KEY_ID_ATTR_MAX, nla,
+				     nl802154_key_id_policy))
+		return -EINVAL;
+
+	if (!attrs[NL802154_KEY_ID_ATTR_MODE])
+		return -EINVAL;
+
+	desc->mode = nla_get_u32(attrs[NL802154_KEY_ID_ATTR_MODE]);
+	switch (desc->mode) {
+	case NL802154_KEY_ID_MODE_IMPLICIT:
+		if (!attrs[NL802154_KEY_ID_ATTR_IMPLICIT])
+			return -EINVAL;
+
+		if (ieee802154_llsec_parse_dev_addr(attrs[NL802154_KEY_ID_ATTR_IMPLICIT],
+						    &desc->device_addr) < 0)
+			return -EINVAL;
+		break;
+	case NL802154_KEY_ID_MODE_INDEX:
+		break;
+	case NL802154_KEY_ID_MODE_INDEX_SHORT:
+		if (!attrs[NL802154_KEY_ID_ATTR_SOURCE_SHORT])
+			return -EINVAL;
+
+		desc->short_source = nla_get_le32(attrs[NL802154_KEY_ID_ATTR_SOURCE_SHORT]);
+		break;
+	case NL802154_KEY_ID_MODE_INDEX_EXTENDED:
+		if (!attrs[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED])
+			return -EINVAL;
+
+		desc->extended_source = nla_get_le64(attrs[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED]);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (desc->mode != NL802154_KEY_ID_MODE_IMPLICIT) {
+		if (!attrs[NL802154_KEY_ID_ATTR_INDEX])
+			return -EINVAL;
+
+		/* TODO change id to idx */
+		desc->id = nla_get_u8(attrs[NL802154_KEY_ID_ATTR_INDEX]);
+	}
+
+	return 0;
+}
+
+static int nl802154_set_llsec_params(struct sk_buff *skb,
+				     struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct ieee802154_llsec_params params;
+	u32 changed = 0;
+	int ret;
+
+	if (info->attrs[NL802154_ATTR_SEC_ENABLED]) {
+		u8 enabled;
+
+		enabled = nla_get_u8(info->attrs[NL802154_ATTR_SEC_ENABLED]);
+		if (enabled != 0 && enabled != 1)
+			return -EINVAL;
+
+		params.enabled = nla_get_u8(info->attrs[NL802154_ATTR_SEC_ENABLED]);
+		changed |= IEEE802154_LLSEC_PARAM_ENABLED;
+	}
+
+	if (info->attrs[NL802154_ATTR_SEC_OUT_KEY_ID]) {
+		ret = ieee802154_llsec_parse_key_id(info->attrs[NL802154_ATTR_SEC_OUT_KEY_ID],
+						    &params.out_key);
+		if (ret < 0)
+			return ret;
+
+		changed |= IEEE802154_LLSEC_PARAM_OUT_KEY;
+	}
+
+	if (info->attrs[NL802154_ATTR_SEC_OUT_LEVEL]) {
+		params.out_level = nla_get_u32(info->attrs[NL802154_ATTR_SEC_OUT_LEVEL]);
+		if (params.out_level > NL802154_SECLEVEL_MAX)
+			return -EINVAL;
+
+		changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL;
+	}
+
+	if (info->attrs[NL802154_ATTR_SEC_FRAME_COUNTER]) {
+		params.frame_counter = nla_get_be32(info->attrs[NL802154_ATTR_SEC_FRAME_COUNTER]);
+		changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER;
+	}
+
+	return rdev_set_llsec_params(rdev, wpan_dev, &params, changed);
+}
+
+static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid,
+			     u32 seq, int flags,
+			     struct cfg802154_registered_device *rdev,
+			     struct net_device *dev,
+			     const struct ieee802154_llsec_key_entry *key)
+{
+	void *hdr;
+	u32 commands[NL802154_CMD_FRAME_NR_IDS / 32];
+	struct nlattr *nl_key, *nl_key_id;
+
+	hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+	if (!hdr)
+		return -1;
+
+	if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
+
+	nl_key = nla_nest_start(msg, NL802154_ATTR_SEC_KEY);
+	if (!nl_key)
+		goto nla_put_failure;
+
+	nl_key_id = nla_nest_start(msg, NL802154_KEY_ATTR_ID);
+	if (!nl_key_id)
+		goto nla_put_failure;
+
+	if (ieee802154_llsec_send_key_id(msg, &key->id) < 0)
+		goto nla_put_failure;
+
+	nla_nest_end(msg, nl_key_id);
+
+	if (nla_put_u8(msg, NL802154_KEY_ATTR_USAGE_FRAMES,
+		       key->key->frame_types))
+		goto nla_put_failure;
+
+	if (key->key->frame_types & BIT(NL802154_FRAME_CMD)) {
+		/* TODO for each nested */
+		memset(commands, 0, sizeof(commands));
+		commands[7] = key->key->cmd_frame_ids;
+		if (nla_put(msg, NL802154_KEY_ATTR_USAGE_CMDS,
+			    sizeof(commands), commands))
+			goto nla_put_failure;
+	}
+
+	if (nla_put(msg, NL802154_KEY_ATTR_BYTES, NL802154_KEY_SIZE,
+		    key->key->key))
+		goto nla_put_failure;
+
+	nla_nest_end(msg, nl_key);
+	genlmsg_end(msg, hdr);
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct cfg802154_registered_device *rdev = NULL;
+	struct ieee802154_llsec_key_entry *key;
+	struct ieee802154_llsec_table *table;
+	struct wpan_dev *wpan_dev;
+	int err;
+
+	err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
+	if (err)
+		return err;
+
+	if (!wpan_dev->netdev) {
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	rdev_lock_llsec_table(rdev, wpan_dev);
+	rdev_get_llsec_table(rdev, wpan_dev, &table);
+
+	/* TODO make it like station dump */
+	if (cb->args[2])
+		goto out;
+
+	list_for_each_entry(key, &table->keys, list) {
+		if (nl802154_send_key(skb, NL802154_CMD_NEW_SEC_KEY,
+				      NETLINK_CB(cb->skb).portid,
+				      cb->nlh->nlmsg_seq, NLM_F_MULTI,
+				      rdev, wpan_dev->netdev, key) < 0) {
+			/* TODO */
+			err = -EIO;
+			rdev_unlock_llsec_table(rdev, wpan_dev);
+			goto out_err;
+		}
+	}
+
+	cb->args[2] = 1;
+
+out:
+	rdev_unlock_llsec_table(rdev, wpan_dev);
+	err = skb->len;
+out_err:
+	nl802154_finish_wpan_dev_dump(rdev);
+
+	return err;
+}
+
+static const struct nla_policy nl802154_key_policy[NL802154_KEY_ATTR_MAX + 1] = {
+	[NL802154_KEY_ATTR_ID] = { NLA_NESTED },
+	/* TODO handle it as for_each_nested and NLA_FLAG? */
+	[NL802154_KEY_ATTR_USAGE_FRAMES] = { NLA_U8 },
+	/* TODO handle it as for_each_nested, not static array? */
+	[NL802154_KEY_ATTR_USAGE_CMDS] = { .len = NL802154_CMD_FRAME_NR_IDS / 8 },
+	[NL802154_KEY_ATTR_BYTES] = { .len = NL802154_KEY_SIZE },
+};
+
+static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
+	struct ieee802154_llsec_key key = { };
+	struct ieee802154_llsec_key_id id = { };
+	u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
+
+	if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX,
+			     info->attrs[NL802154_ATTR_SEC_KEY],
+			     nl802154_key_policy))
+		return -EINVAL;
+
+	if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] ||
+	    !attrs[NL802154_KEY_ATTR_BYTES])
+		return -EINVAL;
+
+	if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
+		return -ENOBUFS;
+
+	key.frame_types = nla_get_u8(attrs[NL802154_KEY_ATTR_USAGE_FRAMES]);
+	if (key.frame_types > BIT(NL802154_FRAME_MAX) ||
+	    ((key.frame_types & BIT(NL802154_FRAME_CMD)) &&
+	     !attrs[NL802154_KEY_ATTR_USAGE_CMDS]))
+		return -EINVAL;
+
+	if (attrs[NL802154_KEY_ATTR_USAGE_CMDS]) {
+		/* TODO for each nested */
+		nla_memcpy(commands, attrs[NL802154_KEY_ATTR_USAGE_CMDS],
+			   NL802154_CMD_FRAME_NR_IDS / 8);
+
+		/* TODO understand the -EINVAL logic here? last condition */
+		if (commands[0] || commands[1] || commands[2] || commands[3] ||
+		    commands[4] || commands[5] || commands[6] ||
+		    commands[7] > BIT(NL802154_CMD_FRAME_MAX))
+			return -EINVAL;
+
+		key.cmd_frame_ids = commands[7];
+	} else {
+		key.cmd_frame_ids = 0;
+	}
+
+	nla_memcpy(key.key, attrs[NL802154_KEY_ATTR_BYTES], NL802154_KEY_SIZE);
+
+	if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
+		return -ENOBUFS;
+
+	return rdev_add_llsec_key(rdev, wpan_dev, &id, &key);
+}
+
+static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
+	struct ieee802154_llsec_key_id id;
+
+	if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX,
+			     info->attrs[NL802154_ATTR_SEC_KEY],
+			     nl802154_key_policy))
+		return -EINVAL;
+
+	if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
+		return -ENOBUFS;
+
+	return rdev_del_llsec_key(rdev, wpan_dev, &id);
+}
+
+static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid,
+				u32 seq, int flags,
+				struct cfg802154_registered_device *rdev,
+				struct net_device *dev,
+				const struct ieee802154_llsec_device *dev_desc)
+{
+	void *hdr;
+	struct nlattr *nl_device;
+
+	hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+	if (!hdr)
+		return -1;
+
+	if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
+
+	nl_device = nla_nest_start(msg, NL802154_ATTR_SEC_DEVICE);
+	if (!nl_device)
+		goto nla_put_failure;
+
+	if (nla_put_u32(msg, NL802154_DEV_ATTR_FRAME_COUNTER,
+			dev_desc->frame_counter) ||
+	    nla_put_le16(msg, NL802154_DEV_ATTR_PAN_ID, dev_desc->pan_id) ||
+	    nla_put_le16(msg, NL802154_DEV_ATTR_SHORT_ADDR,
+			 dev_desc->short_addr) ||
+	    nla_put_le64(msg, NL802154_DEV_ATTR_EXTENDED_ADDR,
+			 dev_desc->hwaddr) ||
+	    nla_put_u8(msg, NL802154_DEV_ATTR_SECLEVEL_EXEMPT,
+		       dev_desc->seclevel_exempt) ||
+	    nla_put_u32(msg, NL802154_DEV_ATTR_KEY_MODE, dev_desc->key_mode))
+		goto nla_put_failure;
+
+	nla_nest_end(msg, nl_device);
+	genlmsg_end(msg, hdr);
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct cfg802154_registered_device *rdev = NULL;
+	struct ieee802154_llsec_device *dev;
+	struct ieee802154_llsec_table *table;
+	struct wpan_dev *wpan_dev;
+	int err;
+
+	err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
+	if (err)
+		return err;
+
+	if (!wpan_dev->netdev) {
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	rdev_lock_llsec_table(rdev, wpan_dev);
+	rdev_get_llsec_table(rdev, wpan_dev, &table);
+
+	/* TODO make it like station dump */
+	if (cb->args[2])
+		goto out;
+
+	list_for_each_entry(dev, &table->devices, list) {
+		if (nl802154_send_device(skb, NL802154_CMD_NEW_SEC_LEVEL,
+					 NETLINK_CB(cb->skb).portid,
+					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
+					 rdev, wpan_dev->netdev, dev) < 0) {
+			/* TODO */
+			err = -EIO;
+			rdev_unlock_llsec_table(rdev, wpan_dev);
+			goto out_err;
+		}
+	}
+
+	cb->args[2] = 1;
+
+out:
+	rdev_unlock_llsec_table(rdev, wpan_dev);
+	err = skb->len;
+out_err:
+	nl802154_finish_wpan_dev_dump(rdev);
+
+	return err;
+}
+
+static const struct nla_policy nl802154_dev_policy[NL802154_DEV_ATTR_MAX + 1] = {
+	[NL802154_DEV_ATTR_FRAME_COUNTER] = { NLA_U32 },
+	[NL802154_DEV_ATTR_PAN_ID] = { .type = NLA_U16 },
+	[NL802154_DEV_ATTR_SHORT_ADDR] = { .type = NLA_U16 },
+	[NL802154_DEV_ATTR_EXTENDED_ADDR] = { .type = NLA_U64 },
+	[NL802154_DEV_ATTR_SECLEVEL_EXEMPT] = { NLA_U8 },
+	[NL802154_DEV_ATTR_KEY_MODE] = { NLA_U32 },
+};
+
+static int
+ieee802154_llsec_parse_device(struct nlattr *nla,
+			      struct ieee802154_llsec_device *dev)
+{
+	struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
+
+	if (!nla || nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, nla,
+				     nl802154_dev_policy))
+		return -EINVAL;
+
+	memset(dev, 0, sizeof(*dev));
+
+	if (!attrs[NL802154_DEV_ATTR_FRAME_COUNTER] ||
+	    !attrs[NL802154_DEV_ATTR_PAN_ID] ||
+	    !attrs[NL802154_DEV_ATTR_SHORT_ADDR] ||
+	    !attrs[NL802154_DEV_ATTR_EXTENDED_ADDR] ||
+	    !attrs[NL802154_DEV_ATTR_SECLEVEL_EXEMPT] ||
+	    !attrs[NL802154_DEV_ATTR_KEY_MODE])
+		return -EINVAL;
+
+	/* TODO be32 */
+	dev->frame_counter = nla_get_u32(attrs[NL802154_DEV_ATTR_FRAME_COUNTER]);
+	dev->pan_id = nla_get_le16(attrs[NL802154_DEV_ATTR_PAN_ID]);
+	dev->short_addr = nla_get_le16(attrs[NL802154_DEV_ATTR_SHORT_ADDR]);
+	/* TODO rename hwaddr to extended_addr */
+	dev->hwaddr = nla_get_le64(attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]);
+	dev->seclevel_exempt = nla_get_u8(attrs[NL802154_DEV_ATTR_SECLEVEL_EXEMPT]);
+	dev->key_mode = nla_get_u32(attrs[NL802154_DEV_ATTR_KEY_MODE]);
+
+	if (dev->key_mode > NL802154_DEVKEY_MAX ||
+	    (dev->seclevel_exempt != 0 && dev->seclevel_exempt != 1))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct ieee802154_llsec_device dev_desc;
+
+	if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
+					  &dev_desc) < 0)
+		return -EINVAL;
+
+	return rdev_add_device(rdev, wpan_dev, &dev_desc);
+}
+
+static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
+	__le64 extended_addr;
+
+	if (nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX,
+			     info->attrs[NL802154_ATTR_SEC_DEVICE],
+			     nl802154_dev_policy))
+		return -EINVAL;
+
+	if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR])
+		return -EINVAL;
+
+	extended_addr = nla_get_le64(attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]);
+	return rdev_del_device(rdev, wpan_dev, extended_addr);
+}
+
+static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid,
+				u32 seq, int flags,
+				struct cfg802154_registered_device *rdev,
+				struct net_device *dev, __le64 extended_addr,
+				const struct ieee802154_llsec_device_key *devkey)
+{
+	void *hdr;
+	struct nlattr *nl_devkey, *nl_key_id;
+
+	hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+	if (!hdr)
+		return -1;
+
+	if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
+
+	nl_devkey = nla_nest_start(msg, NL802154_ATTR_SEC_DEVKEY);
+	if (!nl_devkey)
+		goto nla_put_failure;
+
+	if (nla_put_le64(msg, NL802154_DEVKEY_ATTR_EXTENDED_ADDR,
+			 extended_addr) ||
+	    nla_put_u32(msg, NL802154_DEVKEY_ATTR_FRAME_COUNTER,
+			devkey->frame_counter))
+		goto nla_put_failure;
+
+	nl_key_id = nla_nest_start(msg, NL802154_DEVKEY_ATTR_ID);
+	if (!nl_key_id)
+		goto nla_put_failure;
+
+	if (ieee802154_llsec_send_key_id(msg, &devkey->key_id) < 0)
+		goto nla_put_failure;
+
+	nla_nest_end(msg, nl_key_id);
+	nla_nest_end(msg, nl_devkey);
+	genlmsg_end(msg, hdr);
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct cfg802154_registered_device *rdev = NULL;
+	struct ieee802154_llsec_device_key *kpos;
+	struct ieee802154_llsec_device *dpos;
+	struct ieee802154_llsec_table *table;
+	struct wpan_dev *wpan_dev;
+	int err;
+
+	err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
+	if (err)
+		return err;
+
+	if (!wpan_dev->netdev) {
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	rdev_lock_llsec_table(rdev, wpan_dev);
+	rdev_get_llsec_table(rdev, wpan_dev, &table);
+
+	/* TODO make it like station dump */
+	if (cb->args[2])
+		goto out;
+
+	/* TODO look if remove devkey and do some nested attribute */
+	list_for_each_entry(dpos, &table->devices, list) {
+		list_for_each_entry(kpos, &dpos->keys, list) {
+			if (nl802154_send_devkey(skb,
+						 NL802154_CMD_NEW_SEC_LEVEL,
+						 NETLINK_CB(cb->skb).portid,
+						 cb->nlh->nlmsg_seq,
+						 NLM_F_MULTI, rdev,
+						 wpan_dev->netdev,
+						 dpos->hwaddr,
+						 kpos) < 0) {
+				/* TODO */
+				err = -EIO;
+				rdev_unlock_llsec_table(rdev, wpan_dev);
+				goto out_err;
+			}
+		}
+	}
+
+	cb->args[2] = 1;
+
+out:
+	rdev_unlock_llsec_table(rdev, wpan_dev);
+	err = skb->len;
+out_err:
+	nl802154_finish_wpan_dev_dump(rdev);
+
+	return err;
+}
+
+static const struct nla_policy nl802154_devkey_policy[NL802154_DEVKEY_ATTR_MAX + 1] = {
+	[NL802154_DEVKEY_ATTR_FRAME_COUNTER] = { NLA_U32 },
+	[NL802154_DEVKEY_ATTR_EXTENDED_ADDR] = { NLA_U64 },
+	[NL802154_DEVKEY_ATTR_ID] = { NLA_NESTED },
+};
+
+static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct nlattr *attrs[NL802154_DEVKEY_ATTR_MAX + 1];
+	struct ieee802154_llsec_device_key key;
+	__le64 extended_addr;
+
+	if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
+	    nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX,
+			     info->attrs[NL802154_ATTR_SEC_DEVKEY],
+			     nl802154_devkey_policy) < 0)
+		return -EINVAL;
+
+	if (!attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER] ||
+	    !attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
+		return -EINVAL;
+
+	/* TODO change key.id ? */
+	if (ieee802154_llsec_parse_key_id(attrs[NL802154_DEVKEY_ATTR_ID],
+					  &key.key_id) < 0)
+		return -ENOBUFS;
+
+	/* TODO be32 */
+	key.frame_counter = nla_get_u32(attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER]);
+	/* TODO change naming hwaddr -> extended_addr
+	 * check unique identifier short+pan OR extended_addr
+	 */
+	extended_addr = nla_get_le64(attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]);
+	return rdev_add_devkey(rdev, wpan_dev, extended_addr, &key);
+}
+
+static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct nlattr *attrs[NL802154_DEVKEY_ATTR_MAX + 1];
+	struct ieee802154_llsec_device_key key;
+	__le64 extended_addr;
+
+	if (nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX,
+			     info->attrs[NL802154_ATTR_SEC_DEVKEY],
+			     nl802154_devkey_policy))
+		return -EINVAL;
+
+	if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
+		return -EINVAL;
+
+	/* TODO change key.id ? */
+	if (ieee802154_llsec_parse_key_id(attrs[NL802154_DEVKEY_ATTR_ID],
+					  &key.key_id) < 0)
+		return -ENOBUFS;
+
+	/* TODO change naming hwaddr -> extended_addr
+	 * check unique identifier short+pan OR extended_addr
+	 */
+	extended_addr = nla_get_le64(attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]);
+	return rdev_del_devkey(rdev, wpan_dev, extended_addr, &key);
+}
+
+static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid,
+				  u32 seq, int flags,
+				  struct cfg802154_registered_device *rdev,
+				  struct net_device *dev,
+				  const struct ieee802154_llsec_seclevel *sl)
+{
+	void *hdr;
+	struct nlattr *nl_seclevel;
+
+	hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
+	if (!hdr)
+		return -1;
+
+	if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
+		goto nla_put_failure;
+
+	nl_seclevel = nla_nest_start(msg, NL802154_ATTR_SEC_LEVEL);
+	if (!nl_seclevel)
+		goto nla_put_failure;
+
+	if (nla_put_u32(msg, NL802154_SECLEVEL_ATTR_FRAME, sl->frame_type) ||
+	    nla_put_u32(msg, NL802154_SECLEVEL_ATTR_LEVELS, sl->sec_levels) ||
+	    nla_put_u8(msg, NL802154_SECLEVEL_ATTR_DEV_OVERRIDE,
+		       sl->device_override))
+		goto nla_put_failure;
+
+	if (sl->frame_type == NL802154_FRAME_CMD) {
+		if (nla_put_u32(msg, NL802154_SECLEVEL_ATTR_CMD_FRAME,
+				sl->cmd_frame_id))
+			goto nla_put_failure;
+	}
+
+	nla_nest_end(msg, nl_seclevel);
+	genlmsg_end(msg, hdr);
+
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+static int
+nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	struct cfg802154_registered_device *rdev = NULL;
+	struct ieee802154_llsec_seclevel *sl;
+	struct ieee802154_llsec_table *table;
+	struct wpan_dev *wpan_dev;
+	int err;
+
+	err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev);
+	if (err)
+		return err;
+
+	if (!wpan_dev->netdev) {
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	rdev_lock_llsec_table(rdev, wpan_dev);
+	rdev_get_llsec_table(rdev, wpan_dev, &table);
+
+	/* TODO make it like station dump */
+	if (cb->args[2])
+		goto out;
+
+	list_for_each_entry(sl, &table->security_levels, list) {
+		if (nl802154_send_seclevel(skb, NL802154_CMD_NEW_SEC_LEVEL,
+					   NETLINK_CB(cb->skb).portid,
+					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
+					   rdev, wpan_dev->netdev, sl) < 0) {
+			/* TODO */
+			err = -EIO;
+			rdev_unlock_llsec_table(rdev, wpan_dev);
+			goto out_err;
+		}
+	}
+
+	cb->args[2] = 1;
+
+out:
+	rdev_unlock_llsec_table(rdev, wpan_dev);
+	err = skb->len;
+out_err:
+	nl802154_finish_wpan_dev_dump(rdev);
+
+	return err;
+}
+
+static const struct nla_policy nl802154_seclevel_policy[NL802154_SECLEVEL_ATTR_MAX + 1] = {
+	[NL802154_SECLEVEL_ATTR_LEVELS] = { .type = NLA_U8 },
+	[NL802154_SECLEVEL_ATTR_FRAME] = { .type = NLA_U32 },
+	[NL802154_SECLEVEL_ATTR_CMD_FRAME] = { .type = NLA_U32 },
+	[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE] = { .type = NLA_U8 },
+};
+
+static int
+llsec_parse_seclevel(struct nlattr *nla, struct ieee802154_llsec_seclevel *sl)
+{
+	struct nlattr *attrs[NL802154_SECLEVEL_ATTR_MAX + 1];
+
+	if (!nla || nla_parse_nested(attrs, NL802154_SECLEVEL_ATTR_MAX, nla,
+				     nl802154_seclevel_policy))
+		return -EINVAL;
+
+	memset(sl, 0, sizeof(*sl));
+
+	if (!attrs[NL802154_SECLEVEL_ATTR_LEVELS] ||
+	    !attrs[NL802154_SECLEVEL_ATTR_FRAME] ||
+	    !attrs[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE])
+		return -EINVAL;
+
+	sl->sec_levels = nla_get_u8(attrs[NL802154_SECLEVEL_ATTR_LEVELS]);
+	sl->frame_type = nla_get_u32(attrs[NL802154_SECLEVEL_ATTR_FRAME]);
+	sl->device_override = nla_get_u8(attrs[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE]);
+	if (sl->frame_type > NL802154_FRAME_MAX ||
+	    (sl->device_override != 0 && sl->device_override != 1))
+		return -EINVAL;
+
+	if (sl->frame_type == NL802154_FRAME_CMD) {
+		if (!attrs[NL802154_SECLEVEL_ATTR_CMD_FRAME])
+			return -EINVAL;
+
+		sl->cmd_frame_id = nla_get_u32(attrs[NL802154_SECLEVEL_ATTR_CMD_FRAME]);
+		if (sl->cmd_frame_id > NL802154_CMD_FRAME_MAX)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
+				       struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct ieee802154_llsec_seclevel sl;
+
+	if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
+				 &sl) < 0)
+		return -EINVAL;
+
+	return rdev_add_seclevel(rdev, wpan_dev, &sl);
+}
+
+static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
+				       struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	struct ieee802154_llsec_seclevel sl;
+
+	if (!info->attrs[NL802154_ATTR_SEC_LEVEL] ||
+	    llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
+				 &sl) < 0)
+		return -EINVAL;
+
+	return rdev_del_seclevel(rdev, wpan_dev, &sl);
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 #define NL802154_FLAG_NEED_WPAN_PHY	0x01
 #define NL802154_FLAG_NEED_NETDEV	0x02
 #define NL802154_FLAG_NEED_RTNL		0x04
@@ -1289,6 +2303,119 @@
 		.internal_flags = NL802154_FLAG_NEED_NETDEV |
 				  NL802154_FLAG_NEED_RTNL,
 	},
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	{
+		.cmd = NL802154_CMD_SET_SEC_PARAMS,
+		.doit = nl802154_set_llsec_params,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_GET_SEC_KEY,
+		/* TODO .doit by matching key id? */
+		.dumpit = nl802154_dump_llsec_key,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_NEW_SEC_KEY,
+		.doit = nl802154_add_llsec_key,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_DEL_SEC_KEY,
+		.doit = nl802154_del_llsec_key,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	/* TODO unique identifier must short+pan OR extended_addr */
+	{
+		.cmd = NL802154_CMD_GET_SEC_DEV,
+		/* TODO .doit by matching extended_addr? */
+		.dumpit = nl802154_dump_llsec_dev,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_NEW_SEC_DEV,
+		.doit = nl802154_add_llsec_dev,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_DEL_SEC_DEV,
+		.doit = nl802154_del_llsec_dev,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	/* TODO remove complete devkey, put it as nested? */
+	{
+		.cmd = NL802154_CMD_GET_SEC_DEVKEY,
+		/* TODO doit by matching ??? */
+		.dumpit = nl802154_dump_llsec_devkey,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_NEW_SEC_DEVKEY,
+		.doit = nl802154_add_llsec_devkey,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_DEL_SEC_DEVKEY,
+		.doit = nl802154_del_llsec_devkey,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_GET_SEC_LEVEL,
+		/* TODO .doit by matching frame_type? */
+		.dumpit = nl802154_dump_llsec_seclevel,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_NEW_SEC_LEVEL,
+		.doit = nl802154_add_llsec_seclevel,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+	{
+		.cmd = NL802154_CMD_DEL_SEC_LEVEL,
+		/* TODO match frame_type only? */
+		.doit = nl802154_del_llsec_seclevel,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 };
 
 /* initialisation/exit functions */
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
index 03b3575..4441c63 100644
--- a/net/ieee802154/rdev-ops.h
+++ b/net/ieee802154/rdev-ops.h
@@ -208,4 +208,113 @@
 	return ret;
 }
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+/* TODO this is already a nl802154, so move into ieee802154 */
+static inline void
+rdev_get_llsec_table(struct cfg802154_registered_device *rdev,
+		     struct wpan_dev *wpan_dev,
+		     struct ieee802154_llsec_table **table)
+{
+	rdev->ops->get_llsec_table(&rdev->wpan_phy, wpan_dev, table);
+}
+
+static inline void
+rdev_lock_llsec_table(struct cfg802154_registered_device *rdev,
+		      struct wpan_dev *wpan_dev)
+{
+	rdev->ops->lock_llsec_table(&rdev->wpan_phy, wpan_dev);
+}
+
+static inline void
+rdev_unlock_llsec_table(struct cfg802154_registered_device *rdev,
+			struct wpan_dev *wpan_dev)
+{
+	rdev->ops->unlock_llsec_table(&rdev->wpan_phy, wpan_dev);
+}
+
+static inline int
+rdev_get_llsec_params(struct cfg802154_registered_device *rdev,
+		      struct wpan_dev *wpan_dev,
+		      struct ieee802154_llsec_params *params)
+{
+	return rdev->ops->get_llsec_params(&rdev->wpan_phy, wpan_dev, params);
+}
+
+static inline int
+rdev_set_llsec_params(struct cfg802154_registered_device *rdev,
+		      struct wpan_dev *wpan_dev,
+		      const struct ieee802154_llsec_params *params,
+		      u32 changed)
+{
+	return rdev->ops->set_llsec_params(&rdev->wpan_phy, wpan_dev, params,
+					   changed);
+}
+
+static inline int
+rdev_add_llsec_key(struct cfg802154_registered_device *rdev,
+		   struct wpan_dev *wpan_dev,
+		   const struct ieee802154_llsec_key_id *id,
+		   const struct ieee802154_llsec_key *key)
+{
+	return rdev->ops->add_llsec_key(&rdev->wpan_phy, wpan_dev, id, key);
+}
+
+static inline int
+rdev_del_llsec_key(struct cfg802154_registered_device *rdev,
+		   struct wpan_dev *wpan_dev,
+		   const struct ieee802154_llsec_key_id *id)
+{
+	return rdev->ops->del_llsec_key(&rdev->wpan_phy, wpan_dev, id);
+}
+
+static inline int
+rdev_add_seclevel(struct cfg802154_registered_device *rdev,
+		  struct wpan_dev *wpan_dev,
+		  const struct ieee802154_llsec_seclevel *sl)
+{
+	return rdev->ops->add_seclevel(&rdev->wpan_phy, wpan_dev, sl);
+}
+
+static inline int
+rdev_del_seclevel(struct cfg802154_registered_device *rdev,
+		  struct wpan_dev *wpan_dev,
+		  const struct ieee802154_llsec_seclevel *sl)
+{
+	return rdev->ops->del_seclevel(&rdev->wpan_phy, wpan_dev, sl);
+}
+
+static inline int
+rdev_add_device(struct cfg802154_registered_device *rdev,
+		struct wpan_dev *wpan_dev,
+		const struct ieee802154_llsec_device *dev_desc)
+{
+	return rdev->ops->add_device(&rdev->wpan_phy, wpan_dev, dev_desc);
+}
+
+static inline int
+rdev_del_device(struct cfg802154_registered_device *rdev,
+		struct wpan_dev *wpan_dev, __le64 extended_addr)
+{
+	return rdev->ops->del_device(&rdev->wpan_phy, wpan_dev, extended_addr);
+}
+
+static inline int
+rdev_add_devkey(struct cfg802154_registered_device *rdev,
+		struct wpan_dev *wpan_dev, __le64 extended_addr,
+		const struct ieee802154_llsec_device_key *devkey)
+{
+	return rdev->ops->add_devkey(&rdev->wpan_phy, wpan_dev, extended_addr,
+				     devkey);
+}
+
+static inline int
+rdev_del_devkey(struct cfg802154_registered_device *rdev,
+		struct wpan_dev *wpan_dev, __le64 extended_addr,
+		const struct ieee802154_llsec_device_key *devkey)
+{
+	return rdev->ops->del_devkey(&rdev->wpan_phy, wpan_dev, extended_addr,
+				     devkey);
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 #endif /* __CFG802154_RDEV_OPS */
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index b6eacf3..a548be2 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -273,7 +273,7 @@
 		goto out;
 	}
 
-	mtu = dev->mtu;
+	mtu = IEEE802154_MTU;
 	pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
 
 	if (size > mtu) {
@@ -637,7 +637,7 @@
 		err = -ENXIO;
 		goto out;
 	}
-	mtu = dev->mtu;
+	mtu = IEEE802154_MTU;
 	pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
 
 	if (size > mtu) {
@@ -676,8 +676,8 @@
 	cb->seclevel = ro->seclevel;
 	cb->seclevel_override = ro->seclevel_override;
 
-	err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr,
-			      ro->bound ? &ro->src_addr : NULL, size);
+	err = wpan_dev_hard_header(skb, dev, &dst_addr,
+				   ro->bound ? &ro->src_addr : NULL, size);
 	if (err < 0)
 		goto out_skb;
 
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 89aacb6..c29809f 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -8,6 +8,7 @@
 	     inet_timewait_sock.o inet_connection_sock.o \
 	     tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
 	     tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \
+	     tcp_recovery.o \
 	     tcp_offload.o datagram.o raw.o udp.o udplite.o \
 	     udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
 	     fib_frontend.o fib_semantics.o fib_trie.o \
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 8a55664..11c4ca1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -119,7 +119,7 @@
 #ifdef CONFIG_IP_MROUTE
 #include <linux/mroute.h>
 #endif
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 
 
 /* The inetsw table contains everything that inet_create needs to
@@ -219,17 +219,13 @@
 		 * shutdown() (rather than close()).
 		 */
 		if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
-		    !inet_csk(sk)->icsk_accept_queue.fastopenq) {
+		    !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
 			if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
-				err = fastopen_init_queue(sk, backlog);
+				fastopen_queue_tune(sk, backlog);
 			else if ((sysctl_tcp_fastopen &
 				  TFO_SERVER_WO_SOCKOPT2) != 0)
-				err = fastopen_init_queue(sk,
+				fastopen_queue_tune(sk,
 				    ((uint)sysctl_tcp_fastopen) >> 16);
-			else
-				err = 0;
-			if (err)
-				goto out;
 
 			tcp_fastopen_init_key_once(true);
 		}
@@ -450,7 +446,7 @@
 			goto out;
 	}
 
-	tb_id = vrf_dev_table_ifindex(net, sk->sk_bound_dev_if) ? : tb_id;
+	tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
 	chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
 
 	/* Not specified by any standard per-se, however it breaks too
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 01308e6..59b3e0e 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -312,7 +312,7 @@
 	if (!skb)
 		return;
 
-	skb_dst_set(skb, dst);
+	skb_dst_set(skb, dst_clone(dst));
 	arp_xmit(skb);
 }
 
@@ -384,7 +384,7 @@
 	}
 
 	if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE))
-		dst = dst_clone(skb_dst(skb));
+		dst = skb_dst(skb);
 	arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
 		     dst_hw, dev->dev_addr, NULL, dst);
 }
@@ -816,7 +816,7 @@
 				} else {
 					pneigh_enqueue(&arp_tbl,
 						       in_dev->arp_parms, skb);
-					return 0;
+					goto out_free_dst;
 				}
 				goto out;
 			}
@@ -870,6 +870,8 @@
 
 out:
 	consume_skb(skb);
+out_free_dst:
+	dst_release(reply_dst);
 	return 0;
 }
 
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 7350084..cebd9d3 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1644,7 +1644,8 @@
 		rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
 }
 
-static size_t inet_get_link_af_size(const struct net_device *dev)
+static size_t inet_get_link_af_size(const struct net_device *dev,
+				    u32 ext_filter_mask)
 {
 	struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
 
@@ -2398,4 +2399,3 @@
 	rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
 		      inet_netconf_dump_devconf, NULL);
 }
-
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 6fcbd21..e786873 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -45,7 +45,7 @@
 #include <net/ip_fib.h>
 #include <net/rtnetlink.h>
 #include <net/xfrm.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 #include <trace/events/fib.h>
 
 #ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -255,7 +255,7 @@
 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
 				__be32 addr)
 {
-	u32 rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL;
+	u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
 
 	return __inet_dev_addr_type(net, dev, addr, rt_table);
 }
@@ -268,7 +268,7 @@
 				      const struct net_device *dev,
 				      __be32 addr)
 {
-	u32 rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL;
+	u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
 
 	return __inet_dev_addr_type(net, NULL, addr, rt_table);
 }
@@ -332,7 +332,7 @@
 	bool dev_match;
 
 	fl4.flowi4_oif = 0;
-	fl4.flowi4_iif = vrf_master_ifindex_rcu(dev);
+	fl4.flowi4_iif = l3mdev_master_ifindex_rcu(dev);
 	if (!fl4.flowi4_iif)
 		fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
 	fl4.daddr = src;
@@ -340,6 +340,7 @@
 	fl4.flowi4_tos = tos;
 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
 	fl4.flowi4_tun_key.tun_id = 0;
+	fl4.flowi4_flags = 0;
 
 	no_addr = idev->ifa_list == NULL;
 
@@ -366,7 +367,7 @@
 		if (nh->nh_dev == dev) {
 			dev_match = true;
 			break;
-		} else if (vrf_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
+		} else if (l3mdev_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
 			dev_match = true;
 			break;
 		}
@@ -803,7 +804,7 @@
 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
 {
 	struct net *net = dev_net(ifa->ifa_dev->dev);
-	u32 tb_id = vrf_dev_table_rtnl(ifa->ifa_dev->dev);
+	u32 tb_id = l3mdev_fib_table(ifa->ifa_dev->dev);
 	struct fib_table *tb;
 	struct fib_config cfg = {
 		.fc_protocol = RTPROT_KERNEL,
@@ -866,9 +867,10 @@
 
 	if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
 	    (prefix != addr || ifa->ifa_prefixlen < 32)) {
-		fib_magic(RTM_NEWROUTE,
-			  dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
-			  prefix, ifa->ifa_prefixlen, prim);
+		if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
+			fib_magic(RTM_NEWROUTE,
+				  dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
+				  prefix, ifa->ifa_prefixlen, prim);
 
 		/* Add network specific broadcasts, when it takes a sense */
 		if (ifa->ifa_prefixlen < 31) {
@@ -913,9 +915,10 @@
 		}
 	} else if (!ipv4_is_zeronet(any) &&
 		   (any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) {
-		fib_magic(RTM_DELROUTE,
-			  dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
-			  any, ifa->ifa_prefixlen, prim);
+		if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
+			fib_magic(RTM_DELROUTE,
+				  dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
+				  any, ifa->ifa_prefixlen, prim);
 		subnet = 1;
 	}
 
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 064bd3c..42778d9 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -57,8 +57,7 @@
 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-
-static DEFINE_SPINLOCK(fib_multipath_lock);
+u32 fib_multipath_secret __read_mostly;
 
 #define for_nexthops(fi) {						\
 	int nhsel; const struct fib_nh *nh;				\
@@ -532,7 +531,67 @@
 	return ret;
 }
 
-#endif
+static void fib_rebalance(struct fib_info *fi)
+{
+	int total;
+	int w;
+	struct in_device *in_dev;
+
+	if (fi->fib_nhs < 2)
+		return;
+
+	total = 0;
+	for_nexthops(fi) {
+		if (nh->nh_flags & RTNH_F_DEAD)
+			continue;
+
+		in_dev = __in_dev_get_rtnl(nh->nh_dev);
+
+		if (in_dev &&
+		    IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+		    nh->nh_flags & RTNH_F_LINKDOWN)
+			continue;
+
+		total += nh->nh_weight;
+	} endfor_nexthops(fi);
+
+	w = 0;
+	change_nexthops(fi) {
+		int upper_bound;
+
+		in_dev = __in_dev_get_rtnl(nexthop_nh->nh_dev);
+
+		if (nexthop_nh->nh_flags & RTNH_F_DEAD) {
+			upper_bound = -1;
+		} else if (in_dev &&
+			   IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+			   nexthop_nh->nh_flags & RTNH_F_LINKDOWN) {
+			upper_bound = -1;
+		} else {
+			w += nexthop_nh->nh_weight;
+			upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31,
+							    total) - 1;
+		}
+
+		atomic_set(&nexthop_nh->nh_upper_bound, upper_bound);
+	} endfor_nexthops(fi);
+
+	net_get_random_once(&fib_multipath_secret,
+			    sizeof(fib_multipath_secret));
+}
+
+static inline void fib_add_weight(struct fib_info *fi,
+				  const struct fib_nh *nh)
+{
+	fi->fib_weight += nh->nh_weight;
+}
+
+#else /* CONFIG_IP_ROUTE_MULTIPATH */
+
+#define fib_rebalance(fi) do { } while (0)
+#define fib_add_weight(fi, nh) do { } while (0)
+
+#endif /* CONFIG_IP_ROUTE_MULTIPATH */
 
 static int fib_encap_match(struct net *net, u16 encap_type,
 			   struct nlattr *encap,
@@ -1094,8 +1153,11 @@
 
 	change_nexthops(fi) {
 		fib_info_update_nh_saddr(net, nexthop_nh);
+		fib_add_weight(fi, nexthop_nh);
 	} endfor_nexthops(fi)
 
+	fib_rebalance(fi);
+
 link_it:
 	ofi = fib_find_info(fi);
 	if (ofi) {
@@ -1317,12 +1379,6 @@
 					nexthop_nh->nh_flags |= RTNH_F_LINKDOWN;
 					break;
 				}
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-				spin_lock_bh(&fib_multipath_lock);
-				fi->fib_power -= nexthop_nh->nh_power;
-				nexthop_nh->nh_power = 0;
-				spin_unlock_bh(&fib_multipath_lock);
-#endif
 				dead++;
 			}
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -1345,6 +1401,8 @@
 			}
 			ret++;
 		}
+
+		fib_rebalance(fi);
 	}
 
 	return ret;
@@ -1467,20 +1525,15 @@
 			    !__in_dev_get_rtnl(dev))
 				continue;
 			alive++;
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-			spin_lock_bh(&fib_multipath_lock);
-			nexthop_nh->nh_power = 0;
 			nexthop_nh->nh_flags &= ~nh_flags;
-			spin_unlock_bh(&fib_multipath_lock);
-#else
-			nexthop_nh->nh_flags &= ~nh_flags;
-#endif
 		} endfor_nexthops(fi)
 
 		if (alive > 0) {
 			fi->fib_flags &= ~nh_flags;
 			ret++;
 		}
+
+		fib_rebalance(fi);
 	}
 
 	return ret;
@@ -1488,62 +1541,40 @@
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 
-/*
- * The algorithm is suboptimal, but it provides really
- * fair weighted route distribution.
- */
-void fib_select_multipath(struct fib_result *res)
+void fib_select_multipath(struct fib_result *res, int hash)
 {
 	struct fib_info *fi = res->fi;
-	struct in_device *in_dev;
-	int w;
 
-	spin_lock_bh(&fib_multipath_lock);
-	if (fi->fib_power <= 0) {
-		int power = 0;
-		change_nexthops(fi) {
-			in_dev = __in_dev_get_rcu(nexthop_nh->nh_dev);
-			if (nexthop_nh->nh_flags & RTNH_F_DEAD)
-				continue;
-			if (in_dev &&
-			    IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
-			    nexthop_nh->nh_flags & RTNH_F_LINKDOWN)
-				continue;
-			power += nexthop_nh->nh_weight;
-			nexthop_nh->nh_power = nexthop_nh->nh_weight;
-		} endfor_nexthops(fi);
-		fi->fib_power = power;
-		if (power <= 0) {
-			spin_unlock_bh(&fib_multipath_lock);
-			/* Race condition: route has just become dead. */
-			res->nh_sel = 0;
-			return;
-		}
-	}
+	for_nexthops(fi) {
+		if (hash > atomic_read(&nh->nh_upper_bound))
+			continue;
 
-
-	/* w should be random number [0..fi->fib_power-1],
-	 * it is pretty bad approximation.
-	 */
-
-	w = jiffies % fi->fib_power;
-
-	change_nexthops(fi) {
-		if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) &&
-		    nexthop_nh->nh_power) {
-			w -= nexthop_nh->nh_power;
-			if (w <= 0) {
-				nexthop_nh->nh_power--;
-				fi->fib_power--;
-				res->nh_sel = nhsel;
-				spin_unlock_bh(&fib_multipath_lock);
-				return;
-			}
-		}
+		res->nh_sel = nhsel;
+		return;
 	} endfor_nexthops(fi);
 
 	/* Race condition: route has just become dead. */
 	res->nh_sel = 0;
-	spin_unlock_bh(&fib_multipath_lock);
 }
 #endif
+
+void fib_select_path(struct net *net, struct fib_result *res,
+		     struct flowi4 *fl4, int mp_hash)
+{
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+	if (res->fi->fib_nhs > 1 && fl4->flowi4_oif == 0) {
+		if (mp_hash < 0)
+			mp_hash = fib_multipath_hash(fl4->saddr, fl4->daddr);
+		fib_select_multipath(res, mp_hash);
+	}
+	else
+#endif
+	if (!res->prefixlen &&
+	    res->table->tb_num_default > 1 &&
+	    res->type == RTN_UNICAST && !fl4->flowi4_oif)
+		fib_select_default(fl4, res);
+
+	if (!fl4->saddr)
+		fl4->saddr = FIB_RES_PREFSRC(net, *res);
+}
+EXPORT_SYMBOL_GPL(fib_select_path);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index e5eb8ac..36e2697 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -96,7 +96,7 @@
 #include <net/xfrm.h>
 #include <net/inet_common.h>
 #include <net/ip_fib.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 
 /*
  *	Build xmit assembly blocks
@@ -309,7 +309,7 @@
 
 	rc = false;
 	if (icmp_global_allow()) {
-		int vif = vrf_master_ifindex(dst->dev);
+		int vif = l3mdev_master_ifindex(dst->dev);
 		struct inet_peer *peer;
 
 		peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
@@ -427,7 +427,7 @@
 	fl4.flowi4_mark = mark;
 	fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
 	fl4.flowi4_proto = IPPROTO_ICMP;
-	fl4.flowi4_oif = vrf_master_ifindex(skb->dev);
+	fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_key(net, &fl4);
 	if (IS_ERR(rt))
@@ -440,6 +440,22 @@
 	icmp_xmit_unlock(sk);
 }
 
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+
+/* Source and destination is swapped. See ip_multipath_icmp_hash */
+static int icmp_multipath_hash_skb(const struct sk_buff *skb)
+{
+	const struct iphdr *iph = ip_hdr(skb);
+
+	return fib_multipath_hash(iph->daddr, iph->saddr);
+}
+
+#else
+
+#define icmp_multipath_hash_skb(skb) (-1)
+
+#endif
+
 static struct rtable *icmp_route_lookup(struct net *net,
 					struct flowi4 *fl4,
 					struct sk_buff *skb_in,
@@ -461,10 +477,11 @@
 	fl4->flowi4_proto = IPPROTO_ICMP;
 	fl4->fl4_icmp_type = type;
 	fl4->fl4_icmp_code = code;
-	fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev);
+	fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev);
 
 	security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
-	rt = __ip_route_output_key(net, fl4);
+	rt = __ip_route_output_key_hash(net, fl4,
+					icmp_multipath_hash_skb(skb_in));
 	if (IS_ERR(rt))
 		return rt;
 
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d38b8b6..64aaf35 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -397,7 +397,7 @@
 
 	pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
 
-	return ip_local_out(skb);
+	return ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
 }
 
 static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
@@ -739,7 +739,7 @@
 	ih->group = group;
 	ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
 
-	return ip_local_out(skb);
+	return ip_local_out(net, skb->sk, skb);
 }
 
 static void igmp_gq_timer_expire(unsigned long data)
@@ -2569,7 +2569,7 @@
 }
 
 /* called with rcu_read_lock() */
-int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
+int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u8 proto)
 {
 	struct ip_mc_list *im;
 	struct ip_mc_list __rcu **mc_hash;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index ba2f90d..1feb15f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -330,14 +330,12 @@
 		if (error)
 			goto out_err;
 	}
-	req = reqsk_queue_remove(queue);
+	req = reqsk_queue_remove(queue, sk);
 	newsk = req->sk;
 
-	sk_acceptq_removed(sk);
 	if (sk->sk_protocol == IPPROTO_TCP &&
-	    tcp_rsk(req)->tfo_listener &&
-	    queue->fastopenq) {
-		spin_lock_bh(&queue->fastopenq->lock);
+	    tcp_rsk(req)->tfo_listener) {
+		spin_lock_bh(&queue->fastopenq.lock);
 		if (tcp_rsk(req)->tfo_listener) {
 			/* We are still waiting for the final ACK from 3WHS
 			 * so can't free req now. Instead, we set req->sk to
@@ -348,7 +346,7 @@
 			req->sk = NULL;
 			req = NULL;
 		}
-		spin_unlock_bh(&queue->fastopenq->lock);
+		spin_unlock_bh(&queue->fastopenq.lock);
 	}
 out:
 	release_sock(sk);
@@ -439,7 +437,7 @@
 }
 EXPORT_SYMBOL_GPL(inet_csk_route_req);
 
-struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
+struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
 					    struct sock *newsk,
 					    const struct request_sock *req)
 {
@@ -478,65 +476,12 @@
 }
 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
 
-static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
-				 const u32 rnd, const u32 synq_hsize)
-{
-	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
-}
-
 #if IS_ENABLED(CONFIG_IPV6)
 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 #else
 #define AF_INET_FAMILY(fam) true
 #endif
 
-/* Note: this is temporary :
- * req sock will no longer be in listener hash table
-*/
-struct request_sock *inet_csk_search_req(struct sock *sk,
-					 const __be16 rport,
-					 const __be32 raddr,
-					 const __be32 laddr)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-	struct request_sock *req;
-	u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
-				  lopt->nr_table_entries);
-
-	spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
-	for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
-		const struct inet_request_sock *ireq = inet_rsk(req);
-
-		if (ireq->ir_rmt_port == rport &&
-		    ireq->ir_rmt_addr == raddr &&
-		    ireq->ir_loc_addr == laddr &&
-		    AF_INET_FAMILY(req->rsk_ops->family)) {
-			atomic_inc(&req->rsk_refcnt);
-			WARN_ON(req->sk);
-			break;
-		}
-	}
-	spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
-
-	return req;
-}
-EXPORT_SYMBOL_GPL(inet_csk_search_req);
-
-void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
-				   unsigned long timeout)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-	const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
-				     inet_rsk(req)->ir_rmt_port,
-				     lopt->hash_rnd, lopt->nr_table_entries);
-
-	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
-	inet_csk_reqsk_queue_added(sk, timeout);
-}
-EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
-
 /* Only thing we need from tcp.h */
 extern int sysctl_tcp_synack_retries;
 
@@ -573,26 +518,20 @@
 }
 EXPORT_SYMBOL(inet_rtx_syn_ack);
 
-/* return true if req was found in the syn_table[] */
+/* return true if req was found in the ehash table */
 static bool reqsk_queue_unlink(struct request_sock_queue *queue,
 			       struct request_sock *req)
 {
-	struct listen_sock *lopt = queue->listen_opt;
-	struct request_sock **prev;
+	struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
 	bool found = false;
 
-	spin_lock(&queue->syn_wait_lock);
+	if (sk_hashed(req_to_sk(req))) {
+		spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
 
-	for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
-	     prev = &(*prev)->dl_next) {
-		if (*prev == req) {
-			*prev = req->dl_next;
-			found = true;
-			break;
-		}
+		spin_lock(lock);
+		found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
+		spin_unlock(lock);
 	}
-
-	spin_unlock(&queue->syn_wait_lock);
 	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
 		reqsk_put(req);
 	return found;
@@ -607,21 +546,25 @@
 }
 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
 
+void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
+{
+	inet_csk_reqsk_queue_drop(sk, req);
+	reqsk_put(req);
+}
+EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
+
 static void reqsk_timer_handler(unsigned long data)
 {
 	struct request_sock *req = (struct request_sock *)data;
 	struct sock *sk_listener = req->rsk_listener;
 	struct inet_connection_sock *icsk = inet_csk(sk_listener);
 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
-	struct listen_sock *lopt = queue->listen_opt;
 	int qlen, expire = 0, resend = 0;
 	int max_retries, thresh;
 	u8 defer_accept;
 
-	if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
-		reqsk_put(req);
-		return;
-	}
+	if (sk_listener->sk_state != TCP_LISTEN)
+		goto drop;
 
 	max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
 	thresh = max_retries;
@@ -642,9 +585,9 @@
 	 * embrions; and abort old ones without pity, if old
 	 * ones are about to clog our table.
 	 */
-	qlen = listen_sock_qlen(lopt);
-	if (qlen >> (lopt->max_qlen_log - 1)) {
-		int young = listen_sock_young(lopt) << 1;
+	qlen = reqsk_queue_len(queue);
+	if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
+		int young = reqsk_queue_len_young(queue) << 1;
 
 		while (thresh > 2) {
 			if (qlen < young)
@@ -666,41 +609,40 @@
 		unsigned long timeo;
 
 		if (req->num_timeout++ == 0)
-			atomic_inc(&lopt->young_dec);
+			atomic_dec(&queue->young);
 		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 		mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
 		return;
 	}
-	inet_csk_reqsk_queue_drop(sk_listener, req);
-	reqsk_put(req);
+drop:
+	inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
 }
 
-void reqsk_queue_hash_req(struct request_sock_queue *queue,
-			  u32 hash, struct request_sock *req,
-			  unsigned long timeout)
+static void reqsk_queue_hash_req(struct request_sock *req,
+				 unsigned long timeout)
 {
-	struct listen_sock *lopt = queue->listen_opt;
-
 	req->num_retrans = 0;
 	req->num_timeout = 0;
 	req->sk = NULL;
 
 	setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
 	mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
-	req->rsk_hash = hash;
 
+	inet_ehash_insert(req_to_sk(req), NULL);
 	/* before letting lookups find us, make sure all req fields
 	 * are committed to memory and refcnt initialized.
 	 */
 	smp_wmb();
-	atomic_set(&req->rsk_refcnt, 2);
-
-	spin_lock(&queue->syn_wait_lock);
-	req->dl_next = lopt->syn_table[hash];
-	lopt->syn_table[hash] = req;
-	spin_unlock(&queue->syn_wait_lock);
+	atomic_set(&req->rsk_refcnt, 2 + 1);
 }
-EXPORT_SYMBOL(reqsk_queue_hash_req);
+
+void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+				   unsigned long timeout)
+{
+	reqsk_queue_hash_req(req, timeout);
+	inet_csk_reqsk_queue_added(sk);
+}
+EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
 
 /**
  *	inet_csk_clone_lock - clone an inet socket, and lock its clone
@@ -791,16 +733,14 @@
 }
 EXPORT_SYMBOL(inet_csk_prepare_forced_close);
 
-int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
+int inet_csk_listen_start(struct sock *sk, int backlog)
 {
-	struct inet_sock *inet = inet_sk(sk);
 	struct inet_connection_sock *icsk = inet_csk(sk);
-	int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
+	struct inet_sock *inet = inet_sk(sk);
 
-	if (rc != 0)
-		return rc;
+	reqsk_queue_alloc(&icsk->icsk_accept_queue);
 
-	sk->sk_max_ack_backlog = 0;
+	sk->sk_max_ack_backlog = backlog;
 	sk->sk_ack_backlog = 0;
 	inet_csk_delack_init(sk);
 
@@ -820,11 +760,76 @@
 	}
 
 	sk->sk_state = TCP_CLOSE;
-	__reqsk_queue_destroy(&icsk->icsk_accept_queue);
 	return -EADDRINUSE;
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
 
+static void inet_child_forget(struct sock *sk, struct request_sock *req,
+			      struct sock *child)
+{
+	sk->sk_prot->disconnect(child, O_NONBLOCK);
+
+	sock_orphan(child);
+
+	percpu_counter_inc(sk->sk_prot->orphan_count);
+
+	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
+		BUG_ON(tcp_sk(child)->fastopen_rsk != req);
+		BUG_ON(sk != req->rsk_listener);
+
+		/* Paranoid, to prevent race condition if
+		 * an inbound pkt destined for child is
+		 * blocked by sock lock in tcp_v4_rcv().
+		 * Also to satisfy an assertion in
+		 * tcp_v4_destroy_sock().
+		 */
+		tcp_sk(child)->fastopen_rsk = NULL;
+	}
+	inet_csk_destroy_sock(child);
+	reqsk_put(req);
+}
+
+void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
+			      struct sock *child)
+{
+	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+
+	spin_lock(&queue->rskq_lock);
+	if (unlikely(sk->sk_state != TCP_LISTEN)) {
+		inet_child_forget(sk, req, child);
+	} else {
+		req->sk = child;
+		req->dl_next = NULL;
+		if (queue->rskq_accept_head == NULL)
+			queue->rskq_accept_head = req;
+		else
+			queue->rskq_accept_tail->dl_next = req;
+		queue->rskq_accept_tail = req;
+		sk_acceptq_added(sk);
+	}
+	spin_unlock(&queue->rskq_lock);
+}
+EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
+
+struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
+					 struct request_sock *req, bool own_req)
+{
+	if (own_req) {
+		inet_csk_reqsk_queue_drop(sk, req);
+		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+		inet_csk_reqsk_queue_add(sk, req, child);
+		/* Warning: caller must not call reqsk_put(req);
+		 * child stole last reference on it.
+		 */
+		return child;
+	}
+	/* Too bad, another child took ownership of the request, undo. */
+	bh_unlock_sock(child);
+	sock_put(child);
+	return NULL;
+}
+EXPORT_SYMBOL(inet_csk_complete_hashdance);
+
 /*
  *	This routine closes sockets which have been at least partially
  *	opened, but not yet accepted.
@@ -833,11 +838,7 @@
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
-	struct request_sock *acc_req;
-	struct request_sock *req;
-
-	/* make all the listen_opt local to us */
-	acc_req = reqsk_queue_yank_acceptq(queue);
+	struct request_sock *next, *req;
 
 	/* Following specs, it would be better either to send FIN
 	 * (and enter FIN-WAIT-1, it is normal close)
@@ -847,57 +848,34 @@
 	 * To be honest, we are not able to make either
 	 * of the variants now.			--ANK
 	 */
-	reqsk_queue_destroy(queue);
-
-	while ((req = acc_req) != NULL) {
+	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
 		struct sock *child = req->sk;
 
-		acc_req = req->dl_next;
-
 		local_bh_disable();
 		bh_lock_sock(child);
 		WARN_ON(sock_owned_by_user(child));
 		sock_hold(child);
 
-		sk->sk_prot->disconnect(child, O_NONBLOCK);
-
-		sock_orphan(child);
-
-		percpu_counter_inc(sk->sk_prot->orphan_count);
-
-		if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
-			BUG_ON(tcp_sk(child)->fastopen_rsk != req);
-			BUG_ON(sk != req->rsk_listener);
-
-			/* Paranoid, to prevent race condition if
-			 * an inbound pkt destined for child is
-			 * blocked by sock lock in tcp_v4_rcv().
-			 * Also to satisfy an assertion in
-			 * tcp_v4_destroy_sock().
-			 */
-			tcp_sk(child)->fastopen_rsk = NULL;
-		}
-		inet_csk_destroy_sock(child);
-
+		inet_child_forget(sk, req, child);
 		bh_unlock_sock(child);
 		local_bh_enable();
 		sock_put(child);
 
-		sk_acceptq_removed(sk);
-		reqsk_put(req);
+		cond_resched();
 	}
-	if (queue->fastopenq) {
+	if (queue->fastopenq.rskq_rst_head) {
 		/* Free all the reqs queued in rskq_rst_head. */
-		spin_lock_bh(&queue->fastopenq->lock);
-		acc_req = queue->fastopenq->rskq_rst_head;
-		queue->fastopenq->rskq_rst_head = NULL;
-		spin_unlock_bh(&queue->fastopenq->lock);
-		while ((req = acc_req) != NULL) {
-			acc_req = req->dl_next;
+		spin_lock_bh(&queue->fastopenq.lock);
+		req = queue->fastopenq.rskq_rst_head;
+		queue->fastopenq.rskq_rst_head = NULL;
+		spin_unlock_bh(&queue->fastopenq.lock);
+		while (req != NULL) {
+			next = req->dl_next;
 			reqsk_put(req);
+			req = next;
 		}
 	}
-	WARN_ON(sk->sk_ack_backlog);
+	WARN_ON_ONCE(sk->sk_ack_backlog);
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
 
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c3b1f3a..ab9f8a6 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -730,91 +730,21 @@
 #endif
 }
 
-static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
-			       struct netlink_callback *cb,
-			       const struct inet_diag_req_v2 *r,
-			       const struct nlattr *bc)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct inet_sock *inet = inet_sk(sk);
-	struct inet_diag_entry entry;
-	int j, s_j, reqnum, s_reqnum;
-	struct listen_sock *lopt;
-	int err = 0;
-
-	s_j = cb->args[3];
-	s_reqnum = cb->args[4];
-
-	if (s_j > 0)
-		s_j--;
-
-	entry.family = sk->sk_family;
-
-	spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
-
-	lopt = icsk->icsk_accept_queue.listen_opt;
-	if (!lopt || !listen_sock_qlen(lopt))
-		goto out;
-
-	if (bc) {
-		entry.sport = inet->inet_num;
-		entry.userlocks = sk->sk_userlocks;
-	}
-
-	for (j = s_j; j < lopt->nr_table_entries; j++) {
-		struct request_sock *req, *head = lopt->syn_table[j];
-
-		reqnum = 0;
-		for (req = head; req; reqnum++, req = req->dl_next) {
-			struct inet_request_sock *ireq = inet_rsk(req);
-
-			if (reqnum < s_reqnum)
-				continue;
-			if (r->id.idiag_dport != ireq->ir_rmt_port &&
-			    r->id.idiag_dport)
-				continue;
-
-			if (bc) {
-				/* Note: entry.sport and entry.userlocks are already set */
-				entry_fill_addrs(&entry, req_to_sk(req));
-				entry.dport = ntohs(ireq->ir_rmt_port);
-
-				if (!inet_diag_bc_run(bc, &entry))
-					continue;
-			}
-
-			err = inet_req_diag_fill(req_to_sk(req), skb,
-						 NETLINK_CB(cb->skb).portid,
-						 cb->nlh->nlmsg_seq,
-						 NLM_F_MULTI, cb->nlh);
-			if (err < 0) {
-				cb->args[3] = j + 1;
-				cb->args[4] = reqnum;
-				goto out;
-			}
-		}
-
-		s_reqnum = 0;
-	}
-
-out:
-	spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
-
-	return err;
-}
-
 void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
 			 struct netlink_callback *cb,
 			 const struct inet_diag_req_v2 *r, struct nlattr *bc)
 {
 	struct net *net = sock_net(skb->sk);
 	int i, num, s_i, s_num;
+	u32 idiag_states = r->idiag_states;
 
+	if (idiag_states & TCPF_SYN_RECV)
+		idiag_states |= TCPF_NEW_SYN_RECV;
 	s_i = cb->args[1];
 	s_num = num = cb->args[2];
 
 	if (cb->args[0] == 0) {
-		if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
+		if (!(idiag_states & TCPF_LISTEN))
 			goto skip_listen_ht;
 
 		for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
@@ -844,21 +774,11 @@
 				    r->id.idiag_sport)
 					goto next_listen;
 
-				if (!(r->idiag_states & TCPF_LISTEN) ||
-				    r->id.idiag_dport ||
+				if (r->id.idiag_dport ||
 				    cb->args[3] > 0)
-					goto syn_recv;
-
-				if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
-					spin_unlock_bh(&ilb->lock);
-					goto done;
-				}
-
-syn_recv:
-				if (!(r->idiag_states & TCPF_SYN_RECV))
 					goto next_listen;
 
-				if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
+				if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
 					spin_unlock_bh(&ilb->lock);
 					goto done;
 				}
@@ -879,7 +799,7 @@
 		s_i = num = s_num = 0;
 	}
 
-	if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
+	if (!(idiag_states & ~TCPF_LISTEN))
 		goto out;
 
 	for (i = s_i; i <= hashinfo->ehash_mask; i++) {
@@ -906,7 +826,7 @@
 				goto next_normal;
 			state = (sk->sk_state == TCP_TIME_WAIT) ?
 				inet_twsk(sk)->tw_substate : sk->sk_state;
-			if (!(r->idiag_states & (1 << state)))
+			if (!(idiag_states & (1 << state)))
 				goto next_normal;
 			if (r->sdiag_family != AF_UNSPEC &&
 			    sk->sk_family != r->sdiag_family)
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 8912019..ccc5980 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -126,7 +126,7 @@
 }
 EXPORT_SYMBOL(inet_put_port);
 
-int __inet_inherit_port(struct sock *sk, struct sock *child)
+int __inet_inherit_port(const struct sock *sk, struct sock *child)
 {
 	struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
 	unsigned short port = inet_sk(child)->inet_num;
@@ -137,6 +137,10 @@
 
 	spin_lock(&head->lock);
 	tb = inet_csk(sk)->icsk_bind_hash;
+	if (unlikely(!tb)) {
+		spin_unlock(&head->lock);
+		return -ENOENT;
+	}
 	if (tb->port != port) {
 		/* NOTE: using tproxy and redirecting skbs to a proxy
 		 * on a different listener port breaks the assumption
@@ -185,6 +189,8 @@
 				return -1;
 			score += 4;
 		}
+		if (sk->sk_incoming_cpu == raw_smp_processor_id())
+			score++;
 	}
 	return score;
 }
@@ -398,14 +404,18 @@
 					  inet->inet_dport);
 }
 
-void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
+/* insert a socket into ehash, and eventually remove another one
+ * (The another one can be a SYN_RECV or TIMEWAIT
+ */
+bool inet_ehash_insert(struct sock *sk, struct sock *osk)
 {
 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
 	struct hlist_nulls_head *list;
 	struct inet_ehash_bucket *head;
 	spinlock_t *lock;
+	bool ret = true;
 
-	WARN_ON(!sk_unhashed(sk));
+	WARN_ON_ONCE(!sk_unhashed(sk));
 
 	sk->sk_hash = sk_ehashfn(sk);
 	head = inet_ehash_bucket(hashinfo, sk->sk_hash);
@@ -413,24 +423,41 @@
 	lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
 
 	spin_lock(lock);
-	__sk_nulls_add_node_rcu(sk, list);
 	if (osk) {
-		WARN_ON(sk->sk_hash != osk->sk_hash);
-		sk_nulls_del_node_init_rcu(osk);
+		WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
+		ret = sk_nulls_del_node_init_rcu(osk);
 	}
+	if (ret)
+		__sk_nulls_add_node_rcu(sk, list);
 	spin_unlock(lock);
-	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+	return ret;
 }
-EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
+
+bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
+{
+	bool ok = inet_ehash_insert(sk, osk);
+
+	if (ok) {
+		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+	} else {
+		percpu_counter_inc(sk->sk_prot->orphan_count);
+		sk->sk_state = TCP_CLOSE;
+		sock_set_flag(sk, SOCK_DEAD);
+		inet_csk_destroy_sock(sk);
+	}
+	return ok;
+}
+EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
 
 void __inet_hash(struct sock *sk, struct sock *osk)
 {
 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
 	struct inet_listen_hashbucket *ilb;
 
-	if (sk->sk_state != TCP_LISTEN)
-		return __inet_hash_nolisten(sk, osk);
-
+	if (sk->sk_state != TCP_LISTEN) {
+		inet_ehash_nolisten(sk, osk);
+		return;
+	}
 	WARN_ON(!sk_unhashed(sk));
 	ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
 
@@ -551,7 +578,7 @@
 		inet_bind_hash(sk, tb, port);
 		if (sk_unhashed(sk)) {
 			inet_sk(sk)->inet_sport = htons(port);
-			__inet_hash_nolisten(sk, (struct sock *)tw);
+			inet_ehash_nolisten(sk, (struct sock *)tw);
 		}
 		if (tw)
 			inet_twsk_bind_unhash(tw, hinfo);
@@ -568,7 +595,7 @@
 	tb  = inet_csk(sk)->icsk_bind_hash;
 	spin_lock_bh(&head->lock);
 	if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
-		__inet_hash_nolisten(sk, NULL);
+		inet_ehash_nolisten(sk, NULL);
 		spin_unlock_bh(&head->lock);
 		return 0;
 	} else {
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index d66cfb3..da0d7ce 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -72,7 +72,7 @@
 		ip_forward_options(skb);
 
 	skb_sender_cpu_clear(skb);
-	return dst_output(sk, skb);
+	return dst_output(net, sk, skb);
 }
 
 int ip_forward(struct sk_buff *skb)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index fa7f153..5482745 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -48,7 +48,7 @@
 #include <linux/inet.h>
 #include <linux/netfilter_ipv4.h>
 #include <net/inet_ecn.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 
 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
  * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
@@ -78,7 +78,7 @@
 	u8		ecn; /* RFC3168 support */
 	u16		max_df_size; /* largest frag with DF set seen */
 	int             iif;
-	int             vif;   /* VRF device index */
+	int             vif;   /* L3 master device index */
 	unsigned int    rid;
 	struct inet_peer *peer;
 };
@@ -654,11 +654,10 @@
 }
 
 /* Process an incoming IP datagram fragment. */
-int ip_defrag(struct sk_buff *skb, u32 user)
+int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
 {
 	struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
-	int vif = vrf_master_ifindex_rcu(dev);
-	struct net *net = dev_net(dev);
+	int vif = l3mdev_master_ifindex_rcu(dev);
 	struct ipq *qp;
 
 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
@@ -683,7 +682,7 @@
 }
 EXPORT_SYMBOL(ip_defrag);
 
-struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
 {
 	struct iphdr iph;
 	int netoff;
@@ -712,7 +711,7 @@
 			if (pskb_trim_rcsum(skb, netoff + len))
 				return skb;
 			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
-			if (ip_defrag(skb, user))
+			if (ip_defrag(net, skb, user))
 				return NULL;
 			skb_clear_hash(skb);
 		}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index bd0679d..6145214 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -498,10 +498,26 @@
 					csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
 }
 
+static struct rtable *gre_get_rt(struct sk_buff *skb,
+				 struct net_device *dev,
+				 struct flowi4 *fl,
+				 const struct ip_tunnel_key *key)
+{
+	struct net *net = dev_net(dev);
+
+	memset(fl, 0, sizeof(*fl));
+	fl->daddr = key->u.ipv4.dst;
+	fl->saddr = key->u.ipv4.src;
+	fl->flowi4_tos = RT_TOS(key->tos);
+	fl->flowi4_mark = skb->mark;
+	fl->flowi4_proto = IPPROTO_GRE;
+
+	return ip_route_output_key(net, fl);
+}
+
 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct ip_tunnel_info *tun_info;
-	struct net *net = dev_net(dev);
 	const struct ip_tunnel_key *key;
 	struct flowi4 fl;
 	struct rtable *rt;
@@ -516,14 +532,7 @@
 		goto err_free_skb;
 
 	key = &tun_info->key;
-	memset(&fl, 0, sizeof(fl));
-	fl.daddr = key->u.ipv4.dst;
-	fl.saddr = key->u.ipv4.src;
-	fl.flowi4_tos = RT_TOS(key->tos);
-	fl.flowi4_mark = skb->mark;
-	fl.flowi4_proto = IPPROTO_GRE;
-
-	rt = ip_route_output_key(net, &fl);
+	rt = gre_get_rt(skb, dev, &fl, key);
 	if (IS_ERR(rt))
 		goto err_free_skb;
 
@@ -566,6 +575,24 @@
 	dev->stats.tx_dropped++;
 }
 
+static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
+{
+	struct ip_tunnel_info *info = skb_tunnel_info(skb);
+	struct rtable *rt;
+	struct flowi4 fl4;
+
+	if (ip_tunnel_info_af(info) != AF_INET)
+		return -EINVAL;
+
+	rt = gre_get_rt(skb, dev, &fl4, &info->key);
+	if (IS_ERR(rt))
+		return PTR_ERR(rt);
+
+	ip_rt_put(rt);
+	info->key.u.ipv4.src = fl4.saddr;
+	return 0;
+}
+
 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 			      struct net_device *dev)
 {
@@ -1023,6 +1050,7 @@
 	.ndo_change_mtu		= ip_tunnel_change_mtu,
 	.ndo_get_stats64	= ip_tunnel_get_stats64,
 	.ndo_get_iflink		= ip_tunnel_get_iflink,
+	.ndo_fill_metadata_dst	= gre_fill_metadata_dst,
 };
 
 static void ipgre_tap_setup(struct net_device *dev)
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 7cc9f7b..b1209b6 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -157,6 +157,7 @@
 	u8 protocol = ip_hdr(skb)->protocol;
 	struct sock *last = NULL;
 	struct net_device *dev = skb->dev;
+	struct net *net = dev_net(dev);
 
 	for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
 		struct sock *sk = ra->sk;
@@ -167,9 +168,9 @@
 		if (sk && inet_sk(sk)->inet_num == protocol &&
 		    (!sk->sk_bound_dev_if ||
 		     sk->sk_bound_dev_if == dev->ifindex) &&
-		    net_eq(sock_net(sk), dev_net(dev))) {
+		    net_eq(sock_net(sk), net)) {
 			if (ip_is_fragment(ip_hdr(skb))) {
-				if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
+				if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
 					return true;
 			}
 			if (last) {
@@ -246,14 +247,15 @@
 	/*
 	 *	Reassemble IP fragments.
 	 */
+	struct net *net = dev_net(skb->dev);
 
 	if (ip_is_fragment(ip_hdr(skb))) {
-		if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
+		if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
 			return 0;
 	}
 
 	return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
-		       dev_net(skb->dev), NULL, skb, skb->dev, NULL,
+		       net, NULL, skb, skb->dev, NULL,
 		       ip_local_deliver_finish);
 }
 
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 06d2c87..50e2973 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -83,9 +83,10 @@
 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
 EXPORT_SYMBOL(sysctl_ip_default_ttl);
 
-static int ip_fragment(struct sock *sk, struct sk_buff *skb,
-		       unsigned int mtu,
-		       int (*output)(struct sock *, struct sk_buff *));
+static int
+ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+	    unsigned int mtu,
+	    int (*output)(struct net *, struct sock *, struct sk_buff *));
 
 /* Generate a checksum for an outgoing IP datagram. */
 void ip_send_check(struct iphdr *iph)
@@ -95,34 +96,28 @@
 }
 EXPORT_SYMBOL(ip_send_check);
 
-static int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
+int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
 	struct iphdr *iph = ip_hdr(skb);
 
 	iph->tot_len = htons(skb->len);
 	ip_send_check(iph);
 	return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
 		       net, sk, skb, NULL, skb_dst(skb)->dev,
-		       dst_output_okfn);
+		       dst_output);
 }
 
-int __ip_local_out(struct sk_buff *skb)
-{
-	return __ip_local_out_sk(skb->sk, skb);
-}
-
-int ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
+int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	int err;
 
-	err = __ip_local_out(skb);
+	err = __ip_local_out(net, sk, skb);
 	if (likely(err == 1))
-		err = dst_output(sk, skb);
+		err = dst_output(net, sk, skb);
 
 	return err;
 }
-EXPORT_SYMBOL_GPL(ip_local_out_sk);
+EXPORT_SYMBOL_GPL(ip_local_out);
 
 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
 {
@@ -142,6 +137,7 @@
 {
 	struct inet_sock *inet = inet_sk(sk);
 	struct rtable *rt = skb_rtable(skb);
+	struct net *net = sock_net(sk);
 	struct iphdr *iph;
 
 	/* Build the IP header. */
@@ -160,7 +156,7 @@
 		iph->id = 0;
 	} else {
 		iph->frag_off = 0;
-		__ip_select_ident(sock_net(sk), iph, 1);
+		__ip_select_ident(net, iph, 1);
 	}
 
 	if (opt && opt->opt.optlen) {
@@ -172,16 +168,15 @@
 	skb->mark = sk->sk_mark;
 
 	/* Send it out. */
-	return ip_local_out(skb);
+	return ip_local_out(net, skb->sk, skb);
 }
 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
 
-static int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
+static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
 	struct rtable *rt = (struct rtable *)dst;
 	struct net_device *dev = dst->dev;
-	struct net *net = dev_net(dev);
 	unsigned int hh_len = LL_RESERVED_SPACE(dev);
 	struct neighbour *neigh;
 	u32 nexthop;
@@ -225,8 +220,8 @@
 	return -EINVAL;
 }
 
-static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb,
-				unsigned int mtu)
+static int ip_finish_output_gso(struct net *net, struct sock *sk,
+				struct sk_buff *skb, unsigned int mtu)
 {
 	netdev_features_t features;
 	struct sk_buff *segs;
@@ -235,7 +230,7 @@
 	/* common case: locally created skb or seglen is <= mtu */
 	if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
 	      skb_gso_network_seglen(skb) <= mtu)
-		return ip_finish_output2(sk, skb);
+		return ip_finish_output2(net, sk, skb);
 
 	/* Slowpath -  GSO segment length is exceeding the dst MTU.
 	 *
@@ -258,7 +253,7 @@
 		int err;
 
 		segs->next = NULL;
-		err = ip_fragment(sk, segs, mtu, ip_finish_output2);
+		err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
 
 		if (err && ret == 0)
 			ret = err;
@@ -276,24 +271,23 @@
 	/* Policy lookup after SNAT yielded a new policy */
 	if (skb_dst(skb)->xfrm) {
 		IPCB(skb)->flags |= IPSKB_REROUTED;
-		return dst_output(sk, skb);
+		return dst_output(net, sk, skb);
 	}
 #endif
 	mtu = ip_skb_dst_mtu(skb);
 	if (skb_is_gso(skb))
-		return ip_finish_output_gso(sk, skb, mtu);
+		return ip_finish_output_gso(net, sk, skb, mtu);
 
 	if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
-		return ip_fragment(sk, skb, mtu, ip_finish_output2);
+		return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
 
-	return ip_finish_output2(sk, skb);
+	return ip_finish_output2(net, sk, skb);
 }
 
-int ip_mc_output(struct sock *sk, struct sk_buff *skb)
+int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct rtable *rt = skb_rtable(skb);
 	struct net_device *dev = rt->dst.dev;
-	struct net *net = dev_net(dev);
 
 	/*
 	 *	If the indicated interface is up and running, send the packet.
@@ -352,10 +346,9 @@
 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
-int ip_output(struct sock *sk, struct sk_buff *skb)
+int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct net_device *dev = skb_dst(skb)->dev;
-	struct net *net = dev_net(dev);
 
 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
 
@@ -386,6 +379,7 @@
 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
 {
 	struct inet_sock *inet = inet_sk(sk);
+	struct net *net = sock_net(sk);
 	struct ip_options_rcu *inet_opt;
 	struct flowi4 *fl4;
 	struct rtable *rt;
@@ -416,7 +410,7 @@
 		 * keep trying until route appears or the connection times
 		 * itself out.
 		 */
-		rt = ip_route_output_ports(sock_net(sk), fl4, sk,
+		rt = ip_route_output_ports(net, fl4, sk,
 					   daddr, inet->inet_saddr,
 					   inet->inet_dport,
 					   inet->inet_sport,
@@ -453,20 +447,20 @@
 		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
 	}
 
-	ip_select_ident_segs(sock_net(sk), skb, sk,
+	ip_select_ident_segs(net, skb, sk,
 			     skb_shinfo(skb)->gso_segs ?: 1);
 
 	/* TODO : should we use skb->sk here instead of sk ? */
 	skb->priority = sk->sk_priority;
 	skb->mark = sk->sk_mark;
 
-	res = ip_local_out(skb);
+	res = ip_local_out(net, sk, skb);
 	rcu_read_unlock();
 	return res;
 
 no_route:
 	rcu_read_unlock();
-	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
+	IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 	kfree_skb(skb);
 	return -EHOSTUNREACH;
 }
@@ -495,20 +489,18 @@
 	skb_copy_secmark(to, from);
 }
 
-static int ip_fragment(struct sock *sk, struct sk_buff *skb,
+static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 		       unsigned int mtu,
-		       int (*output)(struct sock *, struct sk_buff *))
+		       int (*output)(struct net *, struct sock *, struct sk_buff *))
 {
 	struct iphdr *iph = ip_hdr(skb);
 
 	if ((iph->frag_off & htons(IP_DF)) == 0)
-		return ip_do_fragment(sk, skb, output);
+		return ip_do_fragment(net, sk, skb, output);
 
 	if (unlikely(!skb->ignore_df ||
 		     (IPCB(skb)->frag_max_size &&
 		      IPCB(skb)->frag_max_size > mtu))) {
-		struct net *net = dev_net(skb_rtable(skb)->dst.dev);
-
 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
 			  htonl(mtu));
@@ -516,7 +508,7 @@
 		return -EMSGSIZE;
 	}
 
-	return ip_do_fragment(sk, skb, output);
+	return ip_do_fragment(net, sk, skb, output);
 }
 
 /*
@@ -526,8 +518,8 @@
  *	single device frame, and queue such a frame for sending.
  */
 
-int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
-		   int (*output)(struct sock *, struct sk_buff *))
+int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+		   int (*output)(struct net *, struct sock *, struct sk_buff *))
 {
 	struct iphdr *iph;
 	int ptr;
@@ -537,11 +529,9 @@
 	int offset;
 	__be16 not_last_frag;
 	struct rtable *rt = skb_rtable(skb);
-	struct net *net;
 	int err = 0;
 
 	dev = rt->dst.dev;
-	net = dev_net(dev);
 
 	/*
 	 *	Point into the IP datagram header.
@@ -631,7 +621,7 @@
 				ip_send_check(iph);
 			}
 
-			err = output(sk, skb);
+			err = output(net, sk, skb);
 
 			if (!err)
 				IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
@@ -771,7 +761,7 @@
 
 		ip_send_check(iph);
 
-		err = output(sk, skb2);
+		err = output(net, sk, skb2);
 		if (err)
 			goto fail;
 
@@ -1444,7 +1434,7 @@
 {
 	int err;
 
-	err = ip_local_out(skb);
+	err = ip_local_out(net, skb->sk, skb);
 	if (err) {
 		if (err > 0)
 			err = net_xmit_errno(err);
@@ -1571,7 +1561,7 @@
 	}
 
 	oif = arg->bound_dev_if;
-	if (!oif && netif_index_is_vrf(net, skb->skb_iif))
+	if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
 		oif = skb->skb_iif;
 
 	flowi4_init_output(&fl4, oif,
@@ -1606,7 +1596,6 @@
 			  arg->csumoffset) = csum_fold(csum_add(nskb->csum,
 								arg->csum));
 		nskb->ip_summed = CHECKSUM_NONE;
-		skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
 		ip_push_pending_frames(sk, &fl4);
 	}
 out:
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 84dce6a..6cb9009 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -53,6 +53,7 @@
 		  __u8 tos, __u8 ttl, __be16 df, bool xnet)
 {
 	int pkt_len = skb->len - skb_inner_network_offset(skb);
+	struct net *net = dev_net(rt->dst.dev);
 	struct iphdr *iph;
 	int err;
 
@@ -76,10 +77,9 @@
 	iph->daddr	=	dst;
 	iph->saddr	=	src;
 	iph->ttl	=	ttl;
-	__ip_select_ident(dev_net(rt->dst.dev), iph,
-			  skb_shinfo(skb)->gso_segs ?: 1);
+	__ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1);
 
-	err = ip_local_out_sk(sk, skb);
+	err = ip_local_out(net, sk, skb);
 	if (unlikely(net_xmit_eval(err)))
 		pkt_len = 0;
 	return pkt_len;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 3b87ec5..4d8f0b6 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -197,7 +197,7 @@
 	skb_dst_set(skb, dst);
 	skb->dev = skb_dst(skb)->dev;
 
-	err = dst_output(skb->sk, skb);
+	err = dst_output(tunnel->net, skb->sk, skb);
 	if (net_xmit_eval(err) == 0)
 		err = skb->len;
 	iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index ed4ef09..0bc7412 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -146,6 +146,10 @@
 /* vendor class identifier */
 static char vendor_class_identifier[253] __initdata;
 
+#if defined(CONFIG_IP_PNP_DHCP)
+static char dhcp_client_identifier[253] __initdata;
+#endif
+
 /* Persistent data: */
 
 static int ic_proto_used;			/* Protocol used, if any */
@@ -728,6 +732,16 @@
 			memcpy(e, vendor_class_identifier, len);
 			e += len;
 		}
+		len = strlen(dhcp_client_identifier + 1);
+		/* the minimum length of identifier is 2, include 1 byte type,
+		 * and can not be larger than the length of options
+		 */
+		if (len >= 1 && len < 312 - (e - options) - 1) {
+			*e++ = 61;
+			*e++ = len + 1;
+			memcpy(e, dhcp_client_identifier, len + 1);
+			e += len + 1;
+		}
 	}
 
 	*e++ = 255;	/* End of the list */
@@ -1557,8 +1571,24 @@
 		return 0;
 	}
 #ifdef CONFIG_IP_PNP_DHCP
-	else if (!strcmp(name, "dhcp")) {
+	else if (!strncmp(name, "dhcp", 4)) {
+		char *client_id;
+
 		ic_proto_enabled &= ~IC_RARP;
+		client_id = strstr(name, "dhcp,");
+		if (client_id) {
+			char *v;
+
+			client_id = client_id + 5;
+			v = strchr(client_id, ',');
+			if (!v)
+				return 1;
+			*v = 0;
+			if (kstrtou8(client_id, 0, dhcp_client_identifier))
+				DBG("DHCP: Invalid client identifier type\n");
+			strncpy(dhcp_client_identifier + 1, v + 1, 251);
+			*v = ',';
+		}
 		return 1;
 	}
 #endif
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index cfcb996..fc42525 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1689,7 +1689,7 @@
 	if (unlikely(opt->optlen))
 		ip_forward_options(skb);
 
-	return dst_output(sk, skb);
+	return dst_output(net, sk, skb);
 }
 
 /*
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 61eafc9..c3776ff 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -17,9 +17,8 @@
 #include <net/netfilter/nf_queue.h>
 
 /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
-int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type)
+int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_type)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
 	const struct iphdr *iph = ip_hdr(skb);
 	struct rtable *rt;
 	struct flowi4 fl4 = {};
@@ -104,7 +103,7 @@
 	}
 }
 
-static int nf_ip_reroute(struct sk_buff *skb,
+static int nf_ip_reroute(struct net *net, struct sk_buff *skb,
 			 const struct nf_queue_entry *entry)
 {
 	const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
@@ -116,7 +115,7 @@
 		      skb->mark == rt_info->mark &&
 		      iph->daddr == rt_info->daddr &&
 		      iph->saddr == rt_info->saddr))
-			return ip_route_me_harder(skb, RTN_UNSPEC);
+			return ip_route_me_harder(net, skb, RTN_UNSPEC);
 	}
 	return 0;
 }
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 690d27d..a355841 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -75,6 +75,7 @@
 
 config NF_DUP_IPV4
 	tristate "Netfilter IPv4 packet duplication to alternate destination"
+	depends on !NF_CONNTRACK || NF_CONNTRACK
 	help
 	  This option enables the nf_dup_ipv4 core, which duplicates an IPv4
 	  packet to be rerouted to another destination.
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 2dad3e1..11dccba 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -186,7 +186,7 @@
 	if (FWINV(ret != 0, ARPT_INV_VIA_IN)) {
 		dprintf("VIA in mismatch (%s vs %s).%s\n",
 			indev, arpinfo->iniface,
-			arpinfo->invflags&ARPT_INV_VIA_IN ?" (INV)":"");
+			arpinfo->invflags & ARPT_INV_VIA_IN ? " (INV)" : "");
 		return 0;
 	}
 
@@ -195,7 +195,7 @@
 	if (FWINV(ret != 0, ARPT_INV_VIA_OUT)) {
 		dprintf("VIA out mismatch (%s vs %s).%s\n",
 			outdev, arpinfo->outiface,
-			arpinfo->invflags&ARPT_INV_VIA_OUT ?" (INV)":"");
+			arpinfo->invflags & ARPT_INV_VIA_OUT ? " (INV)" : "");
 		return 0;
 	}
 
@@ -468,7 +468,7 @@
 				pos = newpos;
 			}
 		}
-		next:
+next:
 		duprintf("Finished chain %u\n", hook);
 	}
 	return 1;
@@ -632,7 +632,7 @@
  * newinfo).
  */
 static int translate_table(struct xt_table_info *newinfo, void *entry0,
-                           const struct arpt_replace *repl)
+			   const struct arpt_replace *repl)
 {
 	struct arpt_entry *iter;
 	unsigned int i;
@@ -892,7 +892,7 @@
 #endif
 
 static int get_info(struct net *net, void __user *user,
-                    const int *len, int compat)
+		    const int *len, int compat)
 {
 	char name[XT_TABLE_MAXNAMELEN];
 	struct xt_table *t;
@@ -1069,7 +1069,7 @@
 }
 
 static int do_replace(struct net *net, const void __user *user,
-                      unsigned int len)
+		      unsigned int len)
 {
 	int ret;
 	struct arpt_replace tmp;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 42d0946..b99affa 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -102,7 +102,7 @@
 	if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
 		dprintf("VIA in mismatch (%s vs %s).%s\n",
 			indev, ipinfo->iniface,
-			ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
+			ipinfo->invflags & IPT_INV_VIA_IN ? " (INV)" : "");
 		return false;
 	}
 
@@ -111,7 +111,7 @@
 	if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
 		dprintf("VIA out mismatch (%s vs %s).%s\n",
 			outdev, ipinfo->outiface,
-			ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
+			ipinfo->invflags & IPT_INV_VIA_OUT ? " (INV)" : "");
 		return false;
 	}
 
@@ -120,7 +120,7 @@
 	    FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
 		dprintf("Packet protocol %hi does not match %hi.%s\n",
 			ip->protocol, ipinfo->proto,
-			ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
+			ipinfo->invflags & IPT_INV_PROTO ? " (INV)" : "");
 		return false;
 	}
 
@@ -431,8 +431,8 @@
 	} while (!acpar.hotdrop);
 	pr_debug("Exiting %s; sp at %u\n", __func__, stackidx);
 
- 	xt_write_recseq_end(addend);
- 	local_bh_enable();
+	xt_write_recseq_end(addend);
+	local_bh_enable();
 
 #ifdef DEBUG_ALLOW_ALL
 	return NF_ACCEPT;
@@ -484,7 +484,7 @@
 				unsigned int oldpos, size;
 
 				if ((strcmp(t->target.u.user.name,
-			    		    XT_STANDARD_TARGET) == 0) &&
+					    XT_STANDARD_TARGET) == 0) &&
 				    t->verdict < -NF_MAX_VERDICT - 1) {
 					duprintf("mark_source_chains: bad "
 						"negative verdict (%i)\n",
@@ -549,7 +549,7 @@
 				pos = newpos;
 			}
 		}
-		next:
+next:
 		duprintf("Finished chain %u\n", hook);
 	}
 	return 1;
@@ -804,7 +804,7 @@
    newinfo) */
 static int
 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
-                const struct ipt_replace *repl)
+		const struct ipt_replace *repl)
 {
 	struct ipt_entry *iter;
 	unsigned int i;
@@ -1078,7 +1078,7 @@
 #endif
 
 static int get_info(struct net *net, void __user *user,
-                    const int *len, int compat)
+		    const int *len, int compat)
 {
 	char name[XT_TABLE_MAXNAMELEN];
 	struct xt_table *t;
@@ -1304,7 +1304,7 @@
 
 static int
 do_add_counters(struct net *net, const void __user *user,
-                unsigned int len, int compat)
+		unsigned int len, int compat)
 {
 	unsigned int i;
 	struct xt_counters_info tmp;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 3f32c03..4a9e6db 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -492,14 +492,14 @@
 {
 #define HBUFFERLEN 30
 	char hbuffer[HBUFFERLEN];
-	int j,k;
+	int j, k;
 
-	for (k=0, j=0; k < HBUFFERLEN-3 && j < ETH_ALEN; j++) {
+	for (k = 0, j = 0; k < HBUFFERLEN - 3 && j < ETH_ALEN; j++) {
 		hbuffer[k++] = hex_asc_hi(payload->src_hw[j]);
 		hbuffer[k++] = hex_asc_lo(payload->src_hw[j]);
-		hbuffer[k++]=':';
+		hbuffer[k++] = ':';
 	}
-	hbuffer[--k]='\0';
+	hbuffer[--k] = '\0';
 
 	pr_debug("src %pI4@%s, dst %pI4\n",
 		 &payload->src_ip, hbuffer, &payload->dst_ip);
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 87907d4..1d16c0f 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -59,7 +59,7 @@
 		nf_send_unreach(skb, ICMP_PKT_FILTERED, hook);
 		break;
 	case IPT_TCP_RESET:
-		nf_send_reset(skb, hook);
+		nf_send_reset(par->net, skb, hook);
 	case IPT_ICMP_ECHOREPLY:
 		/* Doesn't happen. */
 		break;
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index d7021f2..5fdc556 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -39,11 +39,14 @@
 }
 
 static void
-synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
+synproxy_send_tcp(const struct synproxy_net *snet,
+		  const struct sk_buff *skb, struct sk_buff *nskb,
 		  struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
 		  struct iphdr *niph, struct tcphdr *nth,
 		  unsigned int tcp_hdr_size)
 {
+	struct net *net = nf_ct_net(snet->tmpl);
+
 	nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0);
 	nskb->ip_summed   = CHECKSUM_PARTIAL;
 	nskb->csum_start  = (unsigned char *)nth - nskb->head;
@@ -51,7 +54,7 @@
 
 	skb_dst_set_noref(nskb, skb_dst(skb));
 	nskb->protocol = htons(ETH_P_IP);
-	if (ip_route_me_harder(nskb, RTN_UNSPEC))
+	if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
 		goto free_nskb;
 
 	if (nfct) {
@@ -60,7 +63,7 @@
 		nf_conntrack_get(nfct);
 	}
 
-	ip_local_out(nskb);
+	ip_local_out(net, nskb->sk, nskb);
 	return;
 
 free_nskb:
@@ -68,7 +71,8 @@
 }
 
 static void
-synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+synproxy_send_client_synack(const struct synproxy_net *snet,
+			    const struct sk_buff *skb, const struct tcphdr *th,
 			    const struct synproxy_options *opts)
 {
 	struct sk_buff *nskb;
@@ -104,7 +108,7 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+	synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
 			  niph, nth, tcp_hdr_size);
 }
 
@@ -148,7 +152,7 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+	synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
 			  niph, nth, tcp_hdr_size);
 }
 
@@ -188,7 +192,7 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+	synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
 }
 
 static void
@@ -226,8 +230,8 @@
 
 	synproxy_build_options(nth, opts);
 
-	synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
-	                  niph, nth, tcp_hdr_size);
+	synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+			  niph, nth, tcp_hdr_size);
 }
 
 static bool
@@ -287,7 +291,7 @@
 					  XT_SYNPROXY_OPT_SACK_PERM |
 					  XT_SYNPROXY_OPT_ECN);
 
-		synproxy_send_client_synack(skb, th, &opts);
+		synproxy_send_client_synack(snet, skb, th, &opts);
 		return NF_DROP;
 
 	} else if (th->ack && !(th->fin || th->rst || th->syn)) {
@@ -433,14 +437,12 @@
 static struct nf_hook_ops ipv4_synproxy_ops[] __read_mostly = {
 	{
 		.hook		= ipv4_synproxy_hook,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP_PRI_CONNTRACK_CONFIRM - 1,
 	},
 	{
 		.hook		= ipv4_synproxy_hook,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_POST_ROUTING,
 		.priority	= NF_IP_PRI_CONNTRACK_CONFIRM - 1,
diff --git a/net/ipv4/netfilter/ipt_ah.c b/net/ipv4/netfilter/ipt_ah.c
index 14a2aa8..a787d07 100644
--- a/net/ipv4/netfilter/ipt_ah.c
+++ b/net/ipv4/netfilter/ipt_ah.c
@@ -25,7 +25,7 @@
 	bool r;
 	pr_debug("spi_match:%c 0x%x <= 0x%x <= 0x%x\n",
 		 invert ? '!' : ' ', min, spi, max);
-	r=(spi >= min && spi <= max) ^ invert;
+	r = (spi >= min && spi <= max) ^ invert;
 	pr_debug(" result %s\n", r ? "PASS" : "FAILED");
 	return r;
 }
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 74dd667..78cc64e 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -60,9 +60,7 @@
 	if (FIB_RES_DEV(res) == dev)
 		dev_match = true;
 #endif
-	if (dev_match || flags & XT_RPFILTER_LOOSE)
-		return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
-	return dev_match;
+	return dev_match || flags & XT_RPFILTER_LOOSE;
 }
 
 static bool rpfilter_is_local(const struct sk_buff *skb)
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 2d6fc91..ba5d392 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -67,7 +67,7 @@
 		    iph->daddr != daddr ||
 		    skb->mark != mark ||
 		    iph->tos != tos) {
-			err = ip_route_me_harder(skb, RTN_UNSPEC);
+			err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
 			if (err < 0)
 				ret = NF_DROP_ERR(err);
 		}
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 3a2e4d8..ae2cd27 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -68,7 +68,6 @@
 	/* Before packet filtering, change destination */
 	{
 		.hook		= iptable_nat_ipv4_in,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_PRE_ROUTING,
 		.priority	= NF_IP_PRI_NAT_DST,
@@ -76,7 +75,6 @@
 	/* After packet filtering, change source */
 	{
 		.hook		= iptable_nat_ipv4_out,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_POST_ROUTING,
 		.priority	= NF_IP_PRI_NAT_SRC,
@@ -84,7 +82,6 @@
 	/* Before packet filtering, change destination */
 	{
 		.hook		= iptable_nat_ipv4_local_fn,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP_PRI_NAT_DST,
@@ -92,7 +89,6 @@
 	/* After packet filtering, change source */
 	{
 		.hook		= iptable_nat_ipv4_fn,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP_PRI_NAT_SRC,
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index f534e2f..c2e23d5 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -79,7 +79,7 @@
 	int ret;
 
 	ret = register_pernet_subsys(&iptable_security_net_ops);
-        if (ret < 0)
+	if (ret < 0)
 		return ret;
 
 	sectbl_ops = xt_hook_link(&security_table, iptable_security_hook);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 752fb40..461ca926 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -166,42 +166,36 @@
 static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
 	{
 		.hook		= ipv4_conntrack_in,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_PRE_ROUTING,
 		.priority	= NF_IP_PRI_CONNTRACK,
 	},
 	{
 		.hook		= ipv4_conntrack_local,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP_PRI_CONNTRACK,
 	},
 	{
 		.hook		= ipv4_helper,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_POST_ROUTING,
 		.priority	= NF_IP_PRI_CONNTRACK_HELPER,
 	},
 	{
 		.hook		= ipv4_confirm,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_POST_ROUTING,
 		.priority	= NF_IP_PRI_CONNTRACK_CONFIRM,
 	},
 	{
 		.hook		= ipv4_helper,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP_PRI_CONNTRACK_HELPER,
 	},
 	{
 		.hook		= ipv4_confirm,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP_PRI_CONNTRACK_CONFIRM,
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index b246346..0e5591c 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -22,14 +22,15 @@
 #endif
 #include <net/netfilter/nf_conntrack_zones.h>
 
-static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
+static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
+				   u_int32_t user)
 {
 	int err;
 
 	skb_orphan(skb);
 
 	local_bh_disable();
-	err = ip_defrag(skb, user);
+	err = ip_defrag(net, skb, user);
 	local_bh_enable();
 
 	if (!err) {
@@ -85,7 +86,7 @@
 		enum ip_defrag_users user =
 			nf_ct_defrag_user(state->hook, skb);
 
-		if (nf_ct_ipv4_gather_frags(skb, user))
+		if (nf_ct_ipv4_gather_frags(state->net, skb, user))
 			return NF_STOLEN;
 	}
 	return NF_ACCEPT;
@@ -94,14 +95,12 @@
 static struct nf_hook_ops ipv4_defrag_ops[] = {
 	{
 		.hook		= ipv4_conntrack_defrag,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_PRE_ROUTING,
 		.priority	= NF_IP_PRI_CONNTRACK_DEFRAG,
 	},
 	{
 		.hook           = ipv4_conntrack_defrag,
-		.owner          = THIS_MODULE,
 		.pf             = NFPROTO_IPV4,
 		.hooknum        = NF_INET_LOCAL_OUT,
 		.priority       = NF_IP_PRI_CONNTRACK_DEFRAG,
diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c
index ce2a59e..ceb1873 100644
--- a/net/ipv4/netfilter/nf_dup_ipv4.c
+++ b/net/ipv4/netfilter/nf_dup_ipv4.c
@@ -92,7 +92,7 @@
 
 	if (nf_dup_ipv4_route(net, skb, gw, oif)) {
 		__this_cpu_write(nf_skb_duplicated, true);
-		ip_local_out(skb);
+		ip_local_out(net, skb->sk, skb);
 		__this_cpu_write(nf_skb_duplicated, false);
 	} else {
 		kfree_skb(skb);
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index bc3b9dc..5075b7e 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -431,7 +431,7 @@
 
 		if (ct->tuplehash[dir].tuple.dst.u3.ip !=
 		    ct->tuplehash[!dir].tuple.src.u3.ip) {
-			err = ip_route_me_harder(skb, RTN_UNSPEC);
+			err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
 			if (err < 0)
 				ret = NF_DROP_ERR(err);
 		}
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 7c67667..ddb894a 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1156,7 +1156,7 @@
 		}
 
 		if (obj->type == SNMP_IPADDR)
-			mangle_address(ctx.begin, ctx.pointer - 4 , map, check);
+			mangle_address(ctx.begin, ctx.pointer - 4, map, check);
 
 		kfree(obj->id);
 		kfree(obj);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 3262e41..c747b2d 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -99,7 +99,7 @@
 EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
 
 /* Send RST reply */
-void nf_send_reset(struct sk_buff *oldskb, int hook)
+void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
 {
 	struct sk_buff *nskb;
 	const struct iphdr *oiph;
@@ -129,7 +129,7 @@
 				   ip4_dst_hoplimit(skb_dst(nskb)));
 	nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
 
-	if (ip_route_me_harder(nskb, RTN_UNSPEC))
+	if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
 		goto free_nskb;
 
 	/* "Never happens" */
@@ -157,7 +157,7 @@
 		dev_queue_xmit(nskb);
 	} else
 #endif
-		ip_local_out(nskb);
+		ip_local_out(net, nskb->sk, nskb);
 
 	return;
 
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
index 9f486b3..2375b0a 100644
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -53,7 +53,7 @@
 		    iph->daddr != daddr ||
 		    skb->mark != mark ||
 		    iph->tos != tos)
-			if (ip_route_me_harder(skb, RTN_UNSPEC))
+			if (ip_route_me_harder(state->net, skb, RTN_UNSPEC))
 				ret = NF_DROP;
 	}
 	return ret;
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index c1582e0..c24f41c 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -30,7 +30,7 @@
 		nf_send_unreach(pkt->skb, priv->icmp_code, pkt->hook);
 		break;
 	case NFT_REJECT_TCP_RST:
-		nf_send_reset(pkt->skb, pkt->hook);
+		nf_send_reset(pkt->net, pkt->skb, pkt->hook);
 		break;
 	default:
 		break;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 28ef8a9..8c0d0bd 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -413,7 +413,7 @@
 
 	err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
 		      net, sk, skb, NULL, rt->dst.dev,
-		      dst_output_okfn);
+		      dst_output);
 	if (err > 0)
 		err = net_xmit_errno(err);
 	if (err)
@@ -484,6 +484,7 @@
 static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
 	struct inet_sock *inet = inet_sk(sk);
+	struct net *net = sock_net(sk);
 	struct ipcm_cookie ipc;
 	struct rtable *rt = NULL;
 	struct flowi4 fl4;
@@ -543,7 +544,7 @@
 	ipc.oif = sk->sk_bound_dev_if;
 
 	if (msg->msg_controllen) {
-		err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
+		err = ip_cmsg_send(net, msg, &ipc, false);
 		if (err)
 			goto out;
 		if (ipc.opt)
@@ -598,6 +599,9 @@
 			    (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
 			   daddr, saddr, 0, 0);
 
+	if (!saddr && ipc.oif)
+		l3mdev_get_saddr(net, ipc.oif, &fl4);
+
 	if (!inet->hdrincl) {
 		rfv.msg = msg;
 		rfv.hlen = 0;
@@ -608,7 +612,7 @@
 	}
 
 	security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
-	rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
+	rt = ip_route_output_flow(net, &fl4, sk);
 	if (IS_ERR(rt)) {
 		err = PTR_ERR(rt);
 		rt = NULL;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6bab845..85f184e 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -112,7 +112,7 @@
 #endif
 #include <net/secure_seq.h>
 #include <net/ip_tunnels.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 
 #define RT_FL_TOS(oldflp4) \
 	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -847,7 +847,7 @@
 		return;
 	}
 	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
-	vif = vrf_master_ifindex_rcu(rt->dst.dev);
+	vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
 	rcu_read_unlock();
 
 	net = dev_net(rt->dst.dev);
@@ -941,7 +941,7 @@
 	}
 
 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
-			       vrf_master_ifindex(skb->dev), 1);
+			       l3mdev_master_ifindex(skb->dev), 1);
 
 	send = true;
 	if (peer) {
@@ -1152,7 +1152,7 @@
 		dst_set_expires(&rt->dst, 0);
 }
 
-static int ip_rt_bug(struct sock *sk, struct sk_buff *skb)
+static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	pr_debug("%s: %pI4 -> %pI4, %s\n",
 		 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
@@ -1487,9 +1487,8 @@
 	    skb->protocol != htons(ETH_P_IP))
 		goto e_inval;
 
-	if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
-		if (ipv4_is_loopback(saddr))
-			goto e_inval;
+	if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
+		goto e_inval;
 
 	if (ipv4_is_zeronet(saddr)) {
 		if (!ipv4_is_local_multicast(daddr))
@@ -1652,6 +1651,48 @@
 	return err;
 }
 
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+
+/* To make ICMP packets follow the right flow, the multipath hash is
+ * calculated from the inner IP addresses in reverse order.
+ */
+static int ip_multipath_icmp_hash(struct sk_buff *skb)
+{
+	const struct iphdr *outer_iph = ip_hdr(skb);
+	struct icmphdr _icmph;
+	const struct icmphdr *icmph;
+	struct iphdr _inner_iph;
+	const struct iphdr *inner_iph;
+
+	if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
+		goto standard_hash;
+
+	icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
+				   &_icmph);
+	if (!icmph)
+		goto standard_hash;
+
+	if (icmph->type != ICMP_DEST_UNREACH &&
+	    icmph->type != ICMP_REDIRECT &&
+	    icmph->type != ICMP_TIME_EXCEEDED &&
+	    icmph->type != ICMP_PARAMETERPROB) {
+		goto standard_hash;
+	}
+
+	inner_iph = skb_header_pointer(skb,
+				       outer_iph->ihl * 4 + sizeof(_icmph),
+				       sizeof(_inner_iph), &_inner_iph);
+	if (!inner_iph)
+		goto standard_hash;
+
+	return fib_multipath_hash(inner_iph->daddr, inner_iph->saddr);
+
+standard_hash:
+	return fib_multipath_hash(outer_iph->saddr, outer_iph->daddr);
+}
+
+#endif /* CONFIG_IP_ROUTE_MULTIPATH */
+
 static int ip_mkroute_input(struct sk_buff *skb,
 			    struct fib_result *res,
 			    const struct flowi4 *fl4,
@@ -1659,8 +1700,15 @@
 			    __be32 daddr, __be32 saddr, u32 tos)
 {
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-	if (res->fi && res->fi->fib_nhs > 1)
-		fib_select_multipath(res);
+	if (res->fi && res->fi->fib_nhs > 1) {
+		int h;
+
+		if (unlikely(ip_hdr(skb)->protocol == IPPROTO_ICMP))
+			h = ip_multipath_icmp_hash(skb);
+		else
+			h = fib_multipath_hash(saddr, daddr);
+		fib_select_multipath(res, h);
+	}
 #endif
 
 	/* create a routing cache entry */
@@ -1740,10 +1788,11 @@
 	 *	Now we are ready to route packet.
 	 */
 	fl4.flowi4_oif = 0;
-	fl4.flowi4_iif = vrf_master_ifindex_rcu(dev) ? : dev->ifindex;
+	fl4.flowi4_iif = l3mdev_fib_oif_rcu(dev);
 	fl4.flowi4_mark = skb->mark;
 	fl4.flowi4_tos = tos;
 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+	fl4.flowi4_flags = 0;
 	fl4.daddr = daddr;
 	fl4.saddr = saddr;
 	err = fib_lookup(net, &fl4, &res, 0);
@@ -1760,7 +1809,7 @@
 		err = fib_validate_source(skb, saddr, daddr, tos,
 					  0, dev, in_dev, &itag);
 		if (err < 0)
-			goto martian_source_keep_err;
+			goto martian_source;
 		goto local_input;
 	}
 
@@ -1782,7 +1831,7 @@
 		err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
 					  in_dev, &itag);
 		if (err < 0)
-			goto martian_source_keep_err;
+			goto martian_source;
 	}
 	flags |= RTCF_BROADCAST;
 	res.type = RTN_BROADCAST;
@@ -1858,8 +1907,6 @@
 	goto out;
 
 martian_source:
-	err = -EINVAL;
-martian_source_keep_err:
 	ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
 	goto out;
 }
@@ -2028,7 +2075,8 @@
  * Major route resolver routine.
  */
 
-struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
+struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
+					  int mp_hash)
 {
 	struct net_device *dev_out = NULL;
 	__u8 tos = RT_FL_TOS(fl4);
@@ -2127,11 +2175,10 @@
 				fl4->saddr = inet_select_addr(dev_out, 0,
 							      RT_SCOPE_HOST);
 		}
-		if (netif_is_vrf(dev_out) &&
-		    !(fl4->flowi4_flags & FLOWI_FLAG_VRFSRC)) {
-			rth = vrf_dev_get_rth(dev_out);
+
+		rth = l3mdev_get_rtable(dev_out, fl4);
+		if (rth)
 			goto out;
-		}
 	}
 
 	if (!fl4->daddr) {
@@ -2149,7 +2196,8 @@
 	if (err) {
 		res.fi = NULL;
 		res.table = NULL;
-		if (fl4->flowi4_oif) {
+		if (fl4->flowi4_oif &&
+		    !netif_index_is_l3_master(net, fl4->flowi4_oif)) {
 			/* Apparently, routing tables are wrong. Assume,
 			   that the destination is on link.
 
@@ -2191,18 +2239,7 @@
 		goto make_route;
 	}
 
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-	if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
-		fib_select_multipath(&res);
-	else
-#endif
-	if (!res.prefixlen &&
-	    res.table->tb_num_default > 1 &&
-	    res.type == RTN_UNICAST && !fl4->flowi4_oif)
-		fib_select_default(fl4, &res);
-
-	if (!fl4->saddr)
-		fl4->saddr = FIB_RES_PREFSRC(net, res);
+	fib_select_path(net, &res, fl4, mp_hash);
 
 	dev_out = FIB_RES_DEV(res);
 	fl4->flowi4_oif = dev_out->ifindex;
@@ -2215,7 +2252,7 @@
 	rcu_read_unlock();
 	return rth;
 }
-EXPORT_SYMBOL_GPL(__ip_route_output_key);
+EXPORT_SYMBOL_GPL(__ip_route_output_key_hash);
 
 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
 {
@@ -2267,7 +2304,7 @@
 
 		new->__use = 1;
 		new->input = dst_discard;
-		new->output = dst_discard_sk;
+		new->output = dst_discard_out;
 
 		new->dev = ort->dst.dev;
 		if (new->dev)
@@ -2471,6 +2508,9 @@
 	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
 	fl4.flowi4_mark = mark;
 
+	if (netif_index_is_l3_master(net, fl4.flowi4_oif))
+		fl4.flowi4_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF;
+
 	if (iif) {
 		struct net_device *dev;
 
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 6595aff..4cbe9f0 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -192,15 +192,11 @@
 }
 EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
 
-__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
-			      __u16 *mssp)
+__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mssp)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	const struct tcphdr *th = tcp_hdr(skb);
 
-	tcp_synq_overflow(sk);
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
-
 	return __cookie_v4_init_sequence(iph, th, mssp);
 }
 
@@ -225,10 +221,13 @@
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct sock *child;
+	bool own_req;
 
-	child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
+	child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
+						 NULL, &own_req);
 	if (child) {
 		atomic_set(&req->rsk_refcnt, 1);
+		sock_rps_save_rxhash(child, skb);
 		inet_csk_reqsk_queue_add(sk, req, child);
 	} else {
 		reqsk_free(req);
@@ -288,6 +287,10 @@
 }
 EXPORT_SYMBOL(cookie_ecn_ok);
 
+/* On input, sk is a listener.
+ * Output is listener if incoming packet would not create a child
+ *           NULL if memory could not be allocated.
+ */
 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
 {
 	struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
@@ -326,7 +329,7 @@
 		goto out;
 
 	ret = NULL;
-	req = inet_reqsk_alloc(&tcp_request_sock_ops, sk); /* for safety */
+	req = inet_reqsk_alloc(&tcp_request_sock_ops, sk, false); /* for safety */
 	if (!req)
 		goto out;
 
@@ -381,10 +384,10 @@
 	}
 
 	/* Try to redo what tcp_v4_send_synack did. */
-	req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
+	req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
 
 	tcp_select_initial_window(tcp_full_space(sk), req->mss,
-				  &req->rcv_wnd, &req->window_clamp,
+				  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
 				  ireq->wscale_ok, &rcv_wscale,
 				  dst_metric(&rt->dst, RTAX_INITRWND));
 
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 894da3a..25300c5 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -496,6 +496,13 @@
 		.proc_handler	= proc_dointvec
 	},
 	{
+		.procname	= "tcp_recovery",
+		.data		= &sysctl_tcp_recovery,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
 		.procname	= "tcp_reordering",
 		.data		= &sysctl_tcp_reordering,
 		.maxlen		= sizeof(int),
@@ -577,6 +584,13 @@
 		.proc_handler	= proc_dointvec
 	},
 	{
+		.procname	= "tcp_min_rtt_wlen",
+		.data		= &sysctl_tcp_min_rtt_wlen,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
+	{
 		.procname	= "tcp_low_latency",
 		.data		= &sysctl_tcp_low_latency,
 		.maxlen		= sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b8b8fa1..0cfa7c0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -388,6 +388,7 @@
 
 	icsk->icsk_rto = TCP_TIMEOUT_INIT;
 	tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+	tp->rtt_min[0].rtt = ~0U;
 
 	/* So many TCP implementations out there (incorrectly) count the
 	 * initial SYN frame in their delayed-ACK and congestion control
@@ -900,7 +901,8 @@
 	 */
 	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
 	    !tcp_passive_fastopen(sk)) {
-		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
+		err = sk_stream_wait_connect(sk, &timeo);
+		if (err != 0)
 			goto out_err;
 	}
 
@@ -967,7 +969,8 @@
 
 		copied += copy;
 		offset += copy;
-		if (!(size -= copy)) {
+		size -= copy;
+		if (!size) {
 			tcp_tx_timestamp(sk, skb);
 			goto out;
 		}
@@ -988,7 +991,8 @@
 		tcp_push(sk, flags & ~MSG_MORE, mss_now,
 			 TCP_NAGLE_PUSH, size_goal);
 
-		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
+		err = sk_stream_wait_memory(sk, &timeo);
+		if (err != 0)
 			goto do_error;
 
 		mss_now = tcp_send_mss(sk, &size_goal, flags);
@@ -1111,7 +1115,8 @@
 	 */
 	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
 	    !tcp_passive_fastopen(sk)) {
-		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
+		err = sk_stream_wait_connect(sk, &timeo);
+		if (err != 0)
 			goto do_error;
 	}
 
@@ -1267,7 +1272,8 @@
 			tcp_push(sk, flags & ~MSG_MORE, mss_now,
 				 TCP_NAGLE_PUSH, size_goal);
 
-		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
+		err = sk_stream_wait_memory(sk, &timeo);
+		if (err != 0)
 			goto do_error;
 
 		mss_now = tcp_send_mss(sk, &size_goal, flags);
@@ -1767,7 +1773,8 @@
 
 			/* __ Restore normal policy in scheduler __ */
 
-			if ((chunk = len - tp->ucopy.len) != 0) {
+			chunk = len - tp->ucopy.len;
+			if (chunk != 0) {
 				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
 				len -= chunk;
 				copied += chunk;
@@ -1778,7 +1785,8 @@
 do_prequeue:
 				tcp_prequeue_process(sk);
 
-				if ((chunk = len - tp->ucopy.len) != 0) {
+				chunk = len - tp->ucopy.len;
+				if (chunk != 0) {
 					NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
 					len -= chunk;
 					copied += chunk;
@@ -2230,7 +2238,8 @@
 	sk->sk_shutdown = 0;
 	sock_reset_flag(sk, SOCK_DONE);
 	tp->srtt_us = 0;
-	if ((tp->write_seq += tp->max_window + 2) == 0)
+	tp->write_seq += tp->max_window + 2;
+	if (tp->write_seq == 0)
 		tp->write_seq = 1;
 	icsk->icsk_backoff = 0;
 	tp->snd_cwnd = 2;
@@ -2253,13 +2262,6 @@
 }
 EXPORT_SYMBOL(tcp_disconnect);
 
-void tcp_sock_destruct(struct sock *sk)
-{
-	inet_sock_destruct(sk);
-
-	kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
-}
-
 static inline bool tcp_can_repair_sock(const struct sock *sk)
 {
 	return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
@@ -2581,7 +2583,7 @@
 		    TCPF_LISTEN))) {
 			tcp_fastopen_init_key_once(true);
 
-			err = fastopen_init_queue(sk, val);
+			fastopen_queue_tune(sk, val);
 		} else {
 			err = -EINVAL;
 		}
@@ -2849,10 +2851,7 @@
 		break;
 
 	case TCP_FASTOPEN:
-		if (icsk->icsk_accept_queue.fastopenq)
-			val = icsk->icsk_accept_queue.fastopenq->max_qlen;
-		else
-			val = 0;
+		val = icsk->icsk_accept_queue.fastopenq.max_qlen;
 		break;
 
 	case TCP_TIMESTAMP:
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 7092a61..7e538f7 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -209,7 +209,7 @@
 
 		/* alpha = (1 - g) * alpha + g * F */
 
-		alpha -= alpha >> dctcp_shift_g;
+		alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
 		if (bytes_ecn) {
 			/* If dctcp_shift_g == 1, a 32bit value would overflow
 			 * after 8 Mbytes.
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index db43c62..55be6ac 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -133,18 +133,20 @@
 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
 	struct sock *child;
 	u32 end_seq;
+	bool own_req;
 
 	req->num_retrans = 0;
 	req->num_timeout = 0;
 	req->sk = NULL;
 
-	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+							 NULL, &own_req);
 	if (!child)
 		return NULL;
 
-	spin_lock(&queue->fastopenq->lock);
-	queue->fastopenq->qlen++;
-	spin_unlock(&queue->fastopenq->lock);
+	spin_lock(&queue->fastopenq.lock);
+	queue->fastopenq.qlen++;
+	spin_unlock(&queue->fastopenq.lock);
 
 	/* Initialize the child socket. Have to fix some values to take
 	 * into account the child is a Fast Open socket and is created
@@ -161,15 +163,13 @@
 	tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
 
 	/* Activate the retrans timer so that SYNACK can be retransmitted.
-	 * The request socket is not added to the SYN table of the parent
+	 * The request socket is not added to the ehash
 	 * because it's been added to the accept queue directly.
 	 */
 	inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
 				  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
 
-	atomic_set(&req->rsk_refcnt, 1);
-	/* Add the child socket directly into the accept queue */
-	inet_csk_reqsk_queue_add(sk, req, child);
+	atomic_set(&req->rsk_refcnt, 2);
 
 	/* Now finish processing the fastopen child socket. */
 	inet_csk(child)->icsk_af_ops->rebuild_header(child);
@@ -178,12 +178,10 @@
 	tcp_init_metrics(child);
 	tcp_init_buffer_space(child);
 
-	/* Queue the data carried in the SYN packet. We need to first
-	 * bump skb's refcnt because the caller will attempt to free it.
-	 * Note that IPv6 might also have used skb_get() trick
-	 * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts)
-	 * So we need to eventually get a clone of the packet,
-	 * before inserting it in sk_receive_queue.
+	/* Queue the data carried in the SYN packet.
+	 * We used to play tricky games with skb_get().
+	 * With lockless listener, it is a dead end.
+	 * Do not think about it.
 	 *
 	 * XXX (TFO) - we honor a zero-payload TFO request for now,
 	 * (any reason not to?) but no need to queue the skb since
@@ -191,12 +189,7 @@
 	 */
 	end_seq = TCP_SKB_CB(skb)->end_seq;
 	if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
-		struct sk_buff *skb2;
-
-		if (unlikely(skb_shared(skb)))
-			skb2 = skb_clone(skb, GFP_ATOMIC);
-		else
-			skb2 = skb_get(skb);
+		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
 		if (likely(skb2)) {
 			skb_dst_drop(skb2);
@@ -214,12 +207,9 @@
 		}
 	}
 	tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
-	sk->sk_data_ready(sk);
-	bh_unlock_sock(child);
-	/* Note: sock_put(child) will be done by tcp_conn_request()
-	 * after SYNACK packet is sent.
+	/* tcp_conn_request() is sending the SYNACK,
+	 * and queues the child into listener accept queue.
 	 */
-	WARN_ON(!req->sk);
 	return child;
 }
 
@@ -237,8 +227,8 @@
 	 * between qlen overflow causing Fast Open to be disabled
 	 * temporarily vs a server not supporting Fast Open at all.
 	 */
-	fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
-	if (!fastopenq || fastopenq->max_qlen == 0)
+	fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
+	if (fastopenq->max_qlen == 0)
 		return false;
 
 	if (fastopenq->qlen >= fastopenq->max_qlen) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4964d53..fdd88c3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -95,6 +95,7 @@
 int sysctl_tcp_rfc1337 __read_mostly;
 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 int sysctl_tcp_frto __read_mostly = 2;
+int sysctl_tcp_min_rtt_wlen __read_mostly = 300;
 
 int sysctl_tcp_thin_dupack __read_mostly;
 
@@ -880,6 +881,7 @@
 
 	if (metric > 0)
 		tcp_disable_early_retrans(tp);
+	tp->rack.reord = 1;
 }
 
 /* This must be called before lost_out is incremented */
@@ -905,8 +907,7 @@
 	}
 }
 
-static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
-					    struct sk_buff *skb)
+void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
 {
 	tcp_verify_retransmit_hint(tp, skb);
 
@@ -1047,70 +1048,6 @@
 	return !before(start_seq, end_seq - tp->max_window);
 }
 
-/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
- * Event "B". Later note: FACK people cheated me again 8), we have to account
- * for reordering! Ugly, but should help.
- *
- * Search retransmitted skbs from write_queue that were sent when snd_nxt was
- * less than what is now known to be received by the other end (derived from
- * highest SACK block). Also calculate the lowest snd_nxt among the remaining
- * retransmitted skbs to avoid some costly processing per ACKs.
- */
-static void tcp_mark_lost_retrans(struct sock *sk, int *flag)
-{
-	const struct inet_connection_sock *icsk = inet_csk(sk);
-	struct tcp_sock *tp = tcp_sk(sk);
-	struct sk_buff *skb;
-	int cnt = 0;
-	u32 new_low_seq = tp->snd_nxt;
-	u32 received_upto = tcp_highest_sack_seq(tp);
-
-	if (!tcp_is_fack(tp) || !tp->retrans_out ||
-	    !after(received_upto, tp->lost_retrans_low) ||
-	    icsk->icsk_ca_state != TCP_CA_Recovery)
-		return;
-
-	tcp_for_write_queue(skb, sk) {
-		u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
-
-		if (skb == tcp_send_head(sk))
-			break;
-		if (cnt == tp->retrans_out)
-			break;
-		if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
-			continue;
-
-		if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
-			continue;
-
-		/* TODO: We would like to get rid of tcp_is_fack(tp) only
-		 * constraint here (see above) but figuring out that at
-		 * least tp->reordering SACK blocks reside between ack_seq
-		 * and received_upto is not easy task to do cheaply with
-		 * the available datastructures.
-		 *
-		 * Whether FACK should check here for tp->reordering segs
-		 * in-between one could argue for either way (it would be
-		 * rather simple to implement as we could count fack_count
-		 * during the walk and do tp->fackets_out - fack_count).
-		 */
-		if (after(received_upto, ack_seq)) {
-			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
-			tp->retrans_out -= tcp_skb_pcount(skb);
-			*flag |= FLAG_LOST_RETRANS;
-			tcp_skb_mark_lost_uncond_verify(tp, skb);
-			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
-		} else {
-			if (before(ack_seq, new_low_seq))
-				new_low_seq = ack_seq;
-			cnt += tcp_skb_pcount(skb);
-		}
-	}
-
-	if (tp->retrans_out)
-		tp->lost_retrans_low = new_low_seq;
-}
-
 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
 			    struct tcp_sack_block_wire *sp, int num_sacks,
 			    u32 prior_snd_una)
@@ -1236,6 +1173,8 @@
 		return sacked;
 
 	if (!(sacked & TCPCB_SACKED_ACKED)) {
+		tcp_rack_advance(tp, xmit_time, sacked);
+
 		if (sacked & TCPCB_SACKED_RETRANS) {
 			/* If the segment is not tagged as lost,
 			 * we do not clear RETRANS, believing
@@ -1837,7 +1776,6 @@
 	    ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
 		tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
 
-	tcp_mark_lost_retrans(sk, &state->flag);
 	tcp_verify_left_out(tp);
 out:
 
@@ -2314,14 +2252,29 @@
 	tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
+static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
+{
+	return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
+	       before(tp->rx_opt.rcv_tsecr, when);
+}
+
+/* skb is spurious retransmitted if the returned timestamp echo
+ * reply is prior to the skb transmission time
+ */
+static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
+				     const struct sk_buff *skb)
+{
+	return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) &&
+	       tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb));
+}
+
 /* Nothing was retransmitted or returned timestamp is less
  * than timestamp of the first retransmission.
  */
 static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
 {
 	return !tp->retrans_stamp ||
-		(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
-		 before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp));
+	       tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
 }
 
 /* Undo procedures. */
@@ -2853,6 +2806,11 @@
 		}
 	}
 
+	/* Use RACK to detect loss */
+	if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS &&
+	    tcp_rack_mark_lost(sk))
+		flag |= FLAG_LOST_RETRANS;
+
 	/* E. Process state. */
 	switch (icsk->icsk_ca_state) {
 	case TCP_CA_Recovery:
@@ -2915,8 +2873,69 @@
 	tcp_xmit_retransmit_queue(sk);
 }
 
+/* Kathleen Nichols' algorithm for tracking the minimum value of
+ * a data stream over some fixed time interval. (E.g., the minimum
+ * RTT over the past five minutes.) It uses constant space and constant
+ * time per update yet almost always delivers the same minimum as an
+ * implementation that has to keep all the data in the window.
+ *
+ * The algorithm keeps track of the best, 2nd best & 3rd best min
+ * values, maintaining an invariant that the measurement time of the
+ * n'th best >= n-1'th best. It also makes sure that the three values
+ * are widely separated in the time window since that bounds the worse
+ * case error when that data is monotonically increasing over the window.
+ *
+ * Upon getting a new min, we can forget everything earlier because it
+ * has no value - the new min is <= everything else in the window by
+ * definition and it's the most recent. So we restart fresh on every new min
+ * and overwrites 2nd & 3rd choices. The same property holds for 2nd & 3rd
+ * best.
+ */
+static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
+{
+	const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ;
+	struct rtt_meas *m = tcp_sk(sk)->rtt_min;
+	struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now };
+	u32 elapsed;
+
+	/* Check if the new measurement updates the 1st, 2nd, or 3rd choices */
+	if (unlikely(rttm.rtt <= m[0].rtt))
+		m[0] = m[1] = m[2] = rttm;
+	else if (rttm.rtt <= m[1].rtt)
+		m[1] = m[2] = rttm;
+	else if (rttm.rtt <= m[2].rtt)
+		m[2] = rttm;
+
+	elapsed = now - m[0].ts;
+	if (unlikely(elapsed > wlen)) {
+		/* Passed entire window without a new min so make 2nd choice
+		 * the new min & 3rd choice the new 2nd. So forth and so on.
+		 */
+		m[0] = m[1];
+		m[1] = m[2];
+		m[2] = rttm;
+		if (now - m[0].ts > wlen) {
+			m[0] = m[1];
+			m[1] = rttm;
+			if (now - m[0].ts > wlen)
+				m[0] = rttm;
+		}
+	} else if (m[1].ts == m[0].ts && elapsed > wlen / 4) {
+		/* Passed a quarter of the window without a new min so
+		 * take 2nd choice from the 2nd quarter of the window.
+		 */
+		m[2] = m[1] = rttm;
+	} else if (m[2].ts == m[1].ts && elapsed > wlen / 2) {
+		/* Passed half the window without a new min so take the 3rd
+		 * choice from the last half of the window.
+		 */
+		m[2] = rttm;
+	}
+}
+
 static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
-				      long seq_rtt_us, long sack_rtt_us)
+				      long seq_rtt_us, long sack_rtt_us,
+				      long ca_rtt_us)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2925,9 +2944,6 @@
 	 * Karn's algorithm forbids taking RTT if some retransmitted data
 	 * is acked (RFC6298).
 	 */
-	if (flag & FLAG_RETRANS_DATA_ACKED)
-		seq_rtt_us = -1L;
-
 	if (seq_rtt_us < 0)
 		seq_rtt_us = sack_rtt_us;
 
@@ -2939,11 +2955,16 @@
 	 */
 	if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
 	    flag & FLAG_ACKED)
-		seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - tp->rx_opt.rcv_tsecr);
-
+		seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp -
+							  tp->rx_opt.rcv_tsecr);
 	if (seq_rtt_us < 0)
 		return false;
 
+	/* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is
+	 * always taken together with ACK, SACK, or TS-opts. Any negative
+	 * values will be skipped with the seq_rtt_us < 0 check above.
+	 */
+	tcp_update_rtt_min(sk, ca_rtt_us);
 	tcp_rtt_estimator(sk, seq_rtt_us);
 	tcp_set_rto(sk);
 
@@ -2964,7 +2985,7 @@
 		rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack);
 	}
 
-	tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L);
+	tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us);
 }
 
 
@@ -3131,6 +3152,8 @@
 
 		if (sacked & TCPCB_SACKED_ACKED)
 			tp->sacked_out -= acked_pcount;
+		else if (tcp_is_sack(tp) && !tcp_skb_spurious_retrans(tp, skb))
+			tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
 		if (sacked & TCPCB_LOST)
 			tp->lost_out -= acked_pcount;
 
@@ -3169,7 +3192,7 @@
 		flag |= FLAG_SACK_RENEGING;
 
 	skb_mstamp_get(&now);
-	if (likely(first_ackt.v64)) {
+	if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
 		seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
 		ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
 	}
@@ -3178,7 +3201,8 @@
 		ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt);
 	}
 
-	rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us);
+	rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
+					ca_rtt_us);
 
 	if (flag & FLAG_ACKED) {
 		tcp_rearm_rto(sk);
@@ -5472,7 +5496,7 @@
 }
 
 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
-					 const struct tcphdr *th, unsigned int len)
+					 const struct tcphdr *th)
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -5698,11 +5722,11 @@
  *	address independent.
  */
 
-int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-			  const struct tcphdr *th, unsigned int len)
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct inet_connection_sock *icsk = inet_csk(sk);
+	const struct tcphdr *th = tcp_hdr(skb);
 	struct request_sock *req;
 	int queued = 0;
 	bool acceptable;
@@ -5749,7 +5773,7 @@
 		goto discard;
 
 	case TCP_SYN_SENT:
-		queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
+		queued = tcp_rcv_synsent_state_process(sk, skb, th);
 		if (queued >= 0)
 			return queued;
 
@@ -6022,7 +6046,7 @@
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
 
-	req->rcv_wnd = 0;		/* So that tcp_send_synack() knows! */
+	req->rsk_rcv_wnd = 0;		/* So that tcp_send_synack() knows! */
 	req->cookie_ts = 0;
 	tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
 	tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
@@ -6042,9 +6066,11 @@
 }
 
 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
-				      struct sock *sk_listener)
+				      struct sock *sk_listener,
+				      bool attach_listener)
 {
-	struct request_sock *req = reqsk_alloc(ops, sk_listener);
+	struct request_sock *req = reqsk_alloc(ops, sk_listener,
+					       attach_listener);
 
 	if (req) {
 		struct inet_request_sock *ireq = inet_rsk(req);
@@ -6064,13 +6090,13 @@
 /*
  * Return true if a syncookie should be sent
  */
-static bool tcp_syn_flood_action(struct sock *sk,
+static bool tcp_syn_flood_action(const struct sock *sk,
 				 const struct sk_buff *skb,
 				 const char *proto)
 {
+	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
 	const char *msg = "Dropping request";
 	bool want_cookie = false;
-	struct listen_sock *lopt;
 
 #ifdef CONFIG_SYN_COOKIES
 	if (sysctl_tcp_syncookies) {
@@ -6081,12 +6107,12 @@
 #endif
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
 
-	lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
-	if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
-		lopt->synflood_warned = 1;
+	if (!queue->synflood_warned &&
+	    sysctl_tcp_syncookies != 2 &&
+	    xchg(&queue->synflood_warned, 1) == 0)
 		pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
 			proto, ntohs(tcp_hdr(skb)->dest), msg);
-	}
+
 	return want_cookie;
 }
 
@@ -6120,8 +6146,6 @@
 	struct request_sock *req;
 	bool want_cookie = false;
 	struct flowi fl;
-	int err;
-
 
 	/* TW buckets are converted to open requests without
 	 * limitations, they conserve resources and peer is
@@ -6145,7 +6169,7 @@
 		goto drop;
 	}
 
-	req = inet_reqsk_alloc(rsk_ops, sk);
+	req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie);
 	if (!req)
 		goto drop;
 
@@ -6230,21 +6254,28 @@
 	tcp_rsk(req)->snt_isn = isn;
 	tcp_rsk(req)->txhash = net_tx_rndhash();
 	tcp_openreq_init_rwin(req, sk, dst);
-	if (!want_cookie)
+	if (!want_cookie) {
+		tcp_reqsk_record_syn(sk, req, skb);
 		fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
-	err = af_ops->send_synack(fastopen_sk ?: sk, dst, &fl, req,
-				  skb_get_queue_mapping(skb), &foc);
+	}
 	if (fastopen_sk) {
+		af_ops->send_synack(fastopen_sk, dst, &fl, req,
+				    &foc, false);
+		/* Add the child socket directly into the accept queue */
+		inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
+		sk->sk_data_ready(sk);
+		bh_unlock_sock(fastopen_sk);
 		sock_put(fastopen_sk);
 	} else {
-		if (err || want_cookie)
-			goto drop_and_free;
-
 		tcp_rsk(req)->tfo_listener = false;
-		af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+		if (!want_cookie)
+			inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+		af_ops->send_synack(sk, dst, &fl, req,
+				    &foc, !want_cookie);
+		if (want_cookie)
+			goto drop_and_free;
 	}
-	tcp_reqsk_record_syn(sk, req, skb);
-
+	reqsk_put(req);
 	return 0;
 
 drop_and_release:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a23ba7d..1c2648b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -324,7 +324,6 @@
 
 	if (seq != tcp_rsk(req)->snt_isn) {
 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-		reqsk_put(req);
 	} else {
 		/*
 		 * Still in SYN_RECV, just remove it silently.
@@ -332,9 +331,10 @@
 		 * created socket, and POSIX does not want network
 		 * errors returned from accept().
 		 */
-		NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
+		NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
 	}
+	reqsk_put(req);
 }
 EXPORT_SYMBOL(tcp_req_err);
 
@@ -576,7 +576,7 @@
  *	Exception: precedence violation. We do not implement it in any case.
  */
 
-static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
+static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
 {
 	const struct tcphdr *th = tcp_hdr(skb);
 	struct {
@@ -795,7 +795,7 @@
 	inet_twsk_put(tw);
 }
 
-static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 				  struct request_sock *req)
 {
 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
@@ -803,7 +803,7 @@
 	 */
 	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
-			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
+			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
 			tcp_time_stamp,
 			req->ts_recent,
 			0,
@@ -821,8 +821,8 @@
 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
 			      struct flowi *fl,
 			      struct request_sock *req,
-			      u16 queue_mapping,
-			      struct tcp_fastopen_cookie *foc)
+			      struct tcp_fastopen_cookie *foc,
+				  bool attach_req)
 {
 	const struct inet_request_sock *ireq = inet_rsk(req);
 	struct flowi4 fl4;
@@ -833,12 +833,11 @@
 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
 		return -1;
 
-	skb = tcp_make_synack(sk, dst, req, foc);
+	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
 
 	if (skb) {
 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
-		skb_set_queue_mapping(skb, queue_mapping);
 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
 					    ireq->ir_rmt_addr,
 					    ireq->opt);
@@ -1112,10 +1111,13 @@
 }
 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
 
+#endif
+
 /* Called with rcu_read_lock() */
-static bool tcp_v4_inbound_md5_hash(struct sock *sk,
+static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
 				    const struct sk_buff *skb)
 {
+#ifdef CONFIG_TCP_MD5SIG
 	/*
 	 * This gets called for each TCP segment that arrives
 	 * so we want to be efficient.
@@ -1165,8 +1167,9 @@
 		return true;
 	}
 	return false;
-}
 #endif
+	return false;
+}
 
 static void tcp_v4_init_req(struct request_sock *req,
 			    const struct sock *sk_listener,
@@ -1180,7 +1183,8 @@
 	ireq->opt = tcp_v4_save_options(skb);
 }
 
-static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
+static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
+					  struct flowi *fl,
 					  const struct request_sock *req,
 					  bool *strict)
 {
@@ -1219,7 +1223,6 @@
 	.route_req	=	tcp_v4_route_req,
 	.init_seq	=	tcp_v4_init_sequence,
 	.send_synack	=	tcp_v4_send_synack,
-	.queue_hash_add =	inet_csk_reqsk_queue_hash_add,
 };
 
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
@@ -1242,9 +1245,11 @@
  * The three way handshake has completed - we got a valid synack -
  * now create the new socket.
  */
-struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 				  struct request_sock *req,
-				  struct dst_entry *dst)
+				  struct dst_entry *dst,
+				  struct request_sock *req_unhash,
+				  bool *own_req)
 {
 	struct inet_request_sock *ireq;
 	struct inet_sock *newinet;
@@ -1320,7 +1325,7 @@
 
 	if (__inet_inherit_port(sk, newsk) < 0)
 		goto put_and_exit;
-	__inet_hash_nolisten(newsk, NULL);
+	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 
 	return newsk;
 
@@ -1338,34 +1343,11 @@
 }
 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
 
-static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
+static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
 {
-	const struct tcphdr *th = tcp_hdr(skb);
-	const struct iphdr *iph = ip_hdr(skb);
-	struct request_sock *req;
-	struct sock *nsk;
-
-	req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
-	if (req) {
-		nsk = tcp_check_req(sk, skb, req, false);
-		if (!nsk || nsk == sk)
-			reqsk_put(req);
-		return nsk;
-	}
-
-	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
-			th->source, iph->daddr, th->dest, inet_iif(skb));
-
-	if (nsk) {
-		if (nsk->sk_state != TCP_TIME_WAIT) {
-			bh_lock_sock(nsk);
-			return nsk;
-		}
-		inet_twsk_put(inet_twsk(nsk));
-		return NULL;
-	}
-
 #ifdef CONFIG_SYN_COOKIES
+	const struct tcphdr *th = tcp_hdr(skb);
+
 	if (!th->syn)
 		sk = cookie_v4_check(sk, skb);
 #endif
@@ -1373,7 +1355,7 @@
 }
 
 /* The socket must have it's spinlock held when we get
- * here.
+ * here, unless it is a TCP_LISTEN socket.
  *
  * We have a potential double-lock case here, so even when
  * doing backlog processing we use the BH locking scheme.
@@ -1404,13 +1386,13 @@
 		goto csum_err;
 
 	if (sk->sk_state == TCP_LISTEN) {
-		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
+		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
+
 		if (!nsk)
 			goto discard;
-
 		if (nsk != sk) {
 			sock_rps_save_rxhash(nsk, skb);
-			sk_mark_napi_id(sk, skb);
+			sk_mark_napi_id(nsk, skb);
 			if (tcp_child_process(sk, nsk, skb)) {
 				rsk = nsk;
 				goto reset;
@@ -1420,7 +1402,7 @@
 	} else
 		sock_rps_save_rxhash(sk, skb);
 
-	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
+	if (tcp_rcv_state_process(sk, skb)) {
 		rsk = sk;
 		goto reset;
 	}
@@ -1590,6 +1572,7 @@
 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
 	TCP_SKB_CB(skb)->sacked	 = 0;
 
+lookup:
 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
 	if (!sk)
 		goto no_tcp_socket;
@@ -1598,6 +1581,33 @@
 	if (sk->sk_state == TCP_TIME_WAIT)
 		goto do_time_wait;
 
+	if (sk->sk_state == TCP_NEW_SYN_RECV) {
+		struct request_sock *req = inet_reqsk(sk);
+		struct sock *nsk = NULL;
+
+		sk = req->rsk_listener;
+		if (tcp_v4_inbound_md5_hash(sk, skb))
+			goto discard_and_relse;
+		if (likely(sk->sk_state == TCP_LISTEN)) {
+			nsk = tcp_check_req(sk, skb, req, false);
+		} else {
+			inet_csk_reqsk_queue_drop_and_put(sk, req);
+			goto lookup;
+		}
+		if (!nsk) {
+			reqsk_put(req);
+			goto discard_it;
+		}
+		if (nsk == sk) {
+			sock_hold(sk);
+			reqsk_put(req);
+		} else if (tcp_child_process(sk, nsk, skb)) {
+			tcp_v4_send_reset(nsk, skb);
+			goto discard_it;
+		} else {
+			return 0;
+		}
+	}
 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 		goto discard_and_relse;
@@ -1606,25 +1616,23 @@
 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
 		goto discard_and_relse;
 
-#ifdef CONFIG_TCP_MD5SIG
-	/*
-	 * We really want to reject the packet as early as possible
-	 * if:
-	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
-	 *  o There is an MD5 option and we're not expecting one
-	 */
 	if (tcp_v4_inbound_md5_hash(sk, skb))
 		goto discard_and_relse;
-#endif
 
 	nf_reset(skb);
 
 	if (sk_filter(sk, skb))
 		goto discard_and_relse;
 
-	sk_incoming_cpu_update(sk);
 	skb->dev = NULL;
 
+	if (sk->sk_state == TCP_LISTEN) {
+		ret = tcp_v4_do_rcv(sk, skb);
+		goto put_and_return;
+	}
+
+	sk_incoming_cpu_update(sk);
+
 	bh_lock_sock_nested(sk);
 	tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
 	ret = 0;
@@ -1639,6 +1647,7 @@
 	}
 	bh_unlock_sock(sk);
 
+put_and_return:
 	sock_put(sk);
 
 	return ret;
@@ -1833,35 +1842,7 @@
 	++st->num;
 	++st->offset;
 
-	if (st->state == TCP_SEQ_STATE_OPENREQ) {
-		struct request_sock *req = cur;
-
-		icsk = inet_csk(st->syn_wait_sk);
-		req = req->dl_next;
-		while (1) {
-			while (req) {
-				if (req->rsk_ops->family == st->family) {
-					cur = req;
-					goto out;
-				}
-				req = req->dl_next;
-			}
-			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
-				break;
-get_req:
-			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
-		}
-		sk	  = sk_nulls_next(st->syn_wait_sk);
-		st->state = TCP_SEQ_STATE_LISTENING;
-		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
-	} else {
-		icsk = inet_csk(sk);
-		spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
-		if (reqsk_queue_len(&icsk->icsk_accept_queue))
-			goto start_req;
-		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
-		sk = sk_nulls_next(sk);
-	}
+	sk = sk_nulls_next(sk);
 get_sk:
 	sk_nulls_for_each_from(sk, node) {
 		if (!net_eq(sock_net(sk), net))
@@ -1871,16 +1852,6 @@
 			goto out;
 		}
 		icsk = inet_csk(sk);
-		spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
-		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
-start_req:
-			st->uid		= sock_i_uid(sk);
-			st->syn_wait_sk = sk;
-			st->state	= TCP_SEQ_STATE_OPENREQ;
-			st->sbucket	= 0;
-			goto get_req;
-		}
-		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
 	}
 	spin_unlock_bh(&ilb->lock);
 	st->offset = 0;
@@ -2012,7 +1983,6 @@
 	void *rc = NULL;
 
 	switch (st->state) {
-	case TCP_SEQ_STATE_OPENREQ:
 	case TCP_SEQ_STATE_LISTENING:
 		if (st->bucket >= INET_LHTABLE_SIZE)
 			break;
@@ -2071,7 +2041,6 @@
 	}
 
 	switch (st->state) {
-	case TCP_SEQ_STATE_OPENREQ:
 	case TCP_SEQ_STATE_LISTENING:
 		rc = listening_get_next(seq, v);
 		if (!rc) {
@@ -2096,11 +2065,6 @@
 	struct tcp_iter_state *st = seq->private;
 
 	switch (st->state) {
-	case TCP_SEQ_STATE_OPENREQ:
-		if (v) {
-			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
-			spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
-		}
 	case TCP_SEQ_STATE_LISTENING:
 		if (v != SEQ_START_TOKEN)
 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
@@ -2154,7 +2118,7 @@
 EXPORT_SYMBOL(tcp_proc_unregister);
 
 static void get_openreq4(const struct request_sock *req,
-			 struct seq_file *f, int i, kuid_t uid)
+			 struct seq_file *f, int i)
 {
 	const struct inet_request_sock *ireq = inet_rsk(req);
 	long delta = req->rsk_timer.expires - jiffies;
@@ -2171,7 +2135,8 @@
 		1,    /* timers active (only the expire timer) */
 		jiffies_delta_to_clock_t(delta),
 		req->num_timeout,
-		from_kuid_munged(seq_user_ns(f), uid),
+		from_kuid_munged(seq_user_ns(f),
+				 sock_i_uid(req->rsk_listener)),
 		0,  /* non standard timer */
 		0, /* open_requests have no inode */
 		0,
@@ -2185,7 +2150,7 @@
 	const struct tcp_sock *tp = tcp_sk(sk);
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	const struct inet_sock *inet = inet_sk(sk);
-	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
+	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
 	__be32 dest = inet->inet_daddr;
 	__be32 src = inet->inet_rcv_saddr;
 	__u16 destp = ntohs(inet->inet_dport);
@@ -2272,18 +2237,12 @@
 	}
 	st = seq->private;
 
-	switch (st->state) {
-	case TCP_SEQ_STATE_LISTENING:
-	case TCP_SEQ_STATE_ESTABLISHED:
-		if (sk->sk_state == TCP_TIME_WAIT)
-			get_timewait4_sock(v, seq, st->num);
-		else
-			get_tcp4_sock(v, seq, st->num);
-		break;
-	case TCP_SEQ_STATE_OPENREQ:
-		get_openreq4(v, seq, st->num, st->uid);
-		break;
-	}
+	if (sk->sk_state == TCP_TIME_WAIT)
+		get_timewait4_sock(v, seq, st->num);
+	else if (sk->sk_state == TCP_NEW_SYN_RECV)
+		get_openreq4(v, seq, st->num);
+	else
+		get_tcp4_sock(v, seq, st->num);
 out:
 	seq_pad(seq, '\n');
 	return 0;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index e4fe62b6..3575dd1 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -381,18 +381,18 @@
 
 	window_clamp = READ_ONCE(tp->window_clamp);
 	/* Set this up on the first call only */
-	req->window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
+	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
 
 	/* limit the window selection if the user enforce a smaller rx buffer */
 	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
-	    (req->window_clamp > full_space || req->window_clamp == 0))
-		req->window_clamp = full_space;
+	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
+		req->rsk_window_clamp = full_space;
 
 	/* tcp_full_space because it is guaranteed to be the first packet */
 	tcp_select_initial_window(full_space,
 		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
-		&req->rcv_wnd,
-		&req->window_clamp,
+		&req->rsk_rcv_wnd,
+		&req->rsk_window_clamp,
 		ireq->wscale_ok,
 		&rcv_wscale,
 		dst_metric(dst, RTAX_INITRWND));
@@ -441,7 +441,9 @@
  * Actually, we could lots of memory writes here. tp of listening
  * socket contains all necessary default parameters.
  */
-struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
+struct sock *tcp_create_openreq_child(const struct sock *sk,
+				      struct request_sock *req,
+				      struct sk_buff *skb)
 {
 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 
@@ -468,6 +470,7 @@
 
 		newtp->srtt_us = 0;
 		newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+		newtp->rtt_min[0].rtt = ~0U;
 		newicsk->icsk_rto = TCP_TIMEOUT_INIT;
 
 		newtp->packets_out = 0;
@@ -510,9 +513,9 @@
 			if (sysctl_tcp_fack)
 				tcp_enable_fack(newtp);
 		}
-		newtp->window_clamp = req->window_clamp;
-		newtp->rcv_ssthresh = req->rcv_wnd;
-		newtp->rcv_wnd = req->rcv_wnd;
+		newtp->window_clamp = req->rsk_window_clamp;
+		newtp->rcv_ssthresh = req->rsk_rcv_wnd;
+		newtp->rcv_wnd = req->rsk_rcv_wnd;
 		newtp->rx_opt.wscale_ok = ireq->wscale_ok;
 		if (newtp->rx_opt.wscale_ok) {
 			newtp->rx_opt.snd_wscale = ireq->snd_wscale;
@@ -545,6 +548,8 @@
 		tcp_ecn_openreq_child(newtp, req);
 		newtp->fastopen_rsk = NULL;
 		newtp->syn_data_acked = 0;
+		newtp->rack.mstamp.v64 = 0;
+		newtp->rack.advanced = 0;
 
 		newtp->saved_syn = req->saved_syn;
 		req->saved_syn = NULL;
@@ -575,8 +580,7 @@
 	const struct tcphdr *th = tcp_hdr(skb);
 	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
 	bool paws_reject = false;
-
-	BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
+	bool own_req;
 
 	tmp_opt.saw_tstamp = 0;
 	if (th->doff > (sizeof(struct tcphdr)>>2)) {
@@ -707,7 +711,7 @@
 	/* RFC793: "first check sequence number". */
 
 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
-					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) {
+					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
 		/* Out of window: send ACK and drop. */
 		if (!(flg & TCP_FLAG_RST))
 			req->rsk_ops->send_ack(sk, skb, req);
@@ -764,17 +768,14 @@
 	 * ESTABLISHED STATE. If it will be dropped after
 	 * socket is created, wait for troubles.
 	 */
-	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
+	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
+							 req, &own_req);
 	if (!child)
 		goto listen_overflow;
 
+	sock_rps_save_rxhash(child, skb);
 	tcp_synack_rtt_meas(child, req);
-	inet_csk_reqsk_queue_drop(sk, req);
-	inet_csk_reqsk_queue_add(sk, req, child);
-	/* Warning: caller must not call reqsk_put(req);
-	 * child stole last reference on it.
-	 */
-	return child;
+	return inet_csk_complete_hashdance(sk, child, req, own_req);
 
 listen_overflow:
 	if (!sysctl_tcp_abort_on_overflow) {
@@ -821,8 +822,7 @@
 	int state = child->sk_state;
 
 	if (!sock_owned_by_user(child)) {
-		ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
-					    skb->len);
+		ret = tcp_rcv_state_process(child, skb);
 		/* Wakeup parent, send SIGIO */
 		if (state == TCP_SYN_RECV && child->sk_state != state)
 			parent->sk_data_ready(parent);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 09bb082..f4f9793 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2655,8 +2655,6 @@
 			net_dbg_ratelimited("retrans_out leaked\n");
 		}
 #endif
-		if (!tp->retrans_out)
-			tp->lost_retrans_low = tp->snd_nxt;
 		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
 		tp->retrans_out += tcp_skb_pcount(skb);
 
@@ -2664,10 +2662,6 @@
 		if (!tp->retrans_stamp)
 			tp->retrans_stamp = tcp_skb_timestamp(skb);
 
-		/* snd_nxt is stored to detect loss of retransmitted segment,
-		 * see tcp_input.c tcp_sacktag_write_queue().
-		 */
-		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
 	} else if (err != -EBUSY) {
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
 	}
@@ -2947,7 +2941,8 @@
  */
 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 				struct request_sock *req,
-				struct tcp_fastopen_cookie *foc)
+				struct tcp_fastopen_cookie *foc,
+				bool attach_req)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
 	const struct tcp_sock *tp = tcp_sk(sk);
@@ -2959,11 +2954,7 @@
 	u16 user_mss;
 	int mss;
 
-	/* sk is a const pointer, because we want to express multiple cpus
-	 * might call us concurrently.
-	 * sock_wmalloc() will change sk->sk_wmem_alloc in an atomic way.
-	 */
-	skb = sock_wmalloc((struct sock *)sk, MAX_TCP_HEADER, 1, GFP_ATOMIC);
+	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
 	if (unlikely(!skb)) {
 		dst_release(dst);
 		return NULL;
@@ -2971,6 +2962,17 @@
 	/* Reserve space for headers. */
 	skb_reserve(skb, MAX_TCP_HEADER);
 
+	if (attach_req) {
+		skb->destructor = sock_edemux;
+		sock_hold(req_to_sk(req));
+		skb->sk = req_to_sk(req);
+	} else {
+		/* sk is a const pointer, because we want to express multiple
+		 * cpu might call us concurrently.
+		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
+		 */
+		skb_set_owner_w(skb, (struct sock *)sk);
+	}
 	skb_dst_set(skb, dst);
 
 	mss = dst_metric_advmss(dst);
@@ -3015,7 +3017,7 @@
 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
 
 	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
-	th->window = htons(min(req->rcv_wnd, 65535U));
+	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
 	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
 	th->doff = (tcp_header_size >> 2);
 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
@@ -3408,7 +3410,7 @@
 	 */
 	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
 	skb_mstamp_get(&skb->skb_mstamp);
-	NET_INC_STATS_BH(sock_net(sk), mib);
+	NET_INC_STATS(sock_net(sk), mib);
 	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
 }
 
@@ -3510,7 +3512,7 @@
 	int res;
 
 	tcp_rsk(req)->txhash = net_tx_rndhash();
-	res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL);
+	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, true);
 	if (!res) {
 		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
new file mode 100644
index 0000000..5353085
--- /dev/null
+++ b/net/ipv4/tcp_recovery.c
@@ -0,0 +1,109 @@
+#include <linux/tcp.h>
+#include <net/tcp.h>
+
+int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS;
+
+/* Marks a packet lost, if some packet sent later has been (s)acked.
+ * The underlying idea is similar to the traditional dupthresh and FACK
+ * but they look at different metrics:
+ *
+ * dupthresh: 3 OOO packets delivered (packet count)
+ * FACK: sequence delta to highest sacked sequence (sequence space)
+ * RACK: sent time delta to the latest delivered packet (time domain)
+ *
+ * The advantage of RACK is it applies to both original and retransmitted
+ * packet and therefore is robust against tail losses. Another advantage
+ * is being more resilient to reordering by simply allowing some
+ * "settling delay", instead of tweaking the dupthresh.
+ *
+ * The current version is only used after recovery starts but can be
+ * easily extended to detect the first loss.
+ */
+int tcp_rack_mark_lost(struct sock *sk)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct sk_buff *skb;
+	u32 reo_wnd, prior_retrans = tp->retrans_out;
+
+	if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
+		return 0;
+
+	/* Reset the advanced flag to avoid unnecessary queue scanning */
+	tp->rack.advanced = 0;
+
+	/* To be more reordering resilient, allow min_rtt/4 settling delay
+	 * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
+	 * RTT because reordering is often a path property and less related
+	 * to queuing or delayed ACKs.
+	 *
+	 * TODO: measure and adapt to the observed reordering delay, and
+	 * use a timer to retransmit like the delayed early retransmit.
+	 */
+	reo_wnd = 1000;
+	if (tp->rack.reord && tcp_min_rtt(tp) != ~0U)
+		reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
+
+	tcp_for_write_queue(skb, sk) {
+		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
+
+		if (skb == tcp_send_head(sk))
+			break;
+
+		/* Skip ones already (s)acked */
+		if (!after(scb->end_seq, tp->snd_una) ||
+		    scb->sacked & TCPCB_SACKED_ACKED)
+			continue;
+
+		if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) {
+
+			if (skb_mstamp_us_delta(&tp->rack.mstamp,
+						&skb->skb_mstamp) <= reo_wnd)
+				continue;
+
+			/* skb is lost if packet sent later is sacked */
+			tcp_skb_mark_lost_uncond_verify(tp, skb);
+			if (scb->sacked & TCPCB_SACKED_RETRANS) {
+				scb->sacked &= ~TCPCB_SACKED_RETRANS;
+				tp->retrans_out -= tcp_skb_pcount(skb);
+				NET_INC_STATS_BH(sock_net(sk),
+						 LINUX_MIB_TCPLOSTRETRANSMIT);
+			}
+		} else if (!(scb->sacked & TCPCB_RETRANS)) {
+			/* Original data are sent sequentially so stop early
+			 * b/c the rest are all sent after rack_sent
+			 */
+			break;
+		}
+	}
+	return prior_retrans - tp->retrans_out;
+}
+
+/* Record the most recently (re)sent time among the (s)acked packets */
+void tcp_rack_advance(struct tcp_sock *tp,
+		      const struct skb_mstamp *xmit_time, u8 sacked)
+{
+	if (tp->rack.mstamp.v64 &&
+	    !skb_mstamp_after(xmit_time, &tp->rack.mstamp))
+		return;
+
+	if (sacked & TCPCB_RETRANS) {
+		struct skb_mstamp now;
+
+		/* If the sacked packet was retransmitted, it's ambiguous
+		 * whether the retransmission or the original (or the prior
+		 * retransmission) was sacked.
+		 *
+		 * If the original is lost, there is no ambiguity. Otherwise
+		 * we assume the original can be delayed up to aRTT + min_rtt.
+		 * the aRTT term is bounded by the fast recovery or timeout,
+		 * so it's at least one RTT (i.e., retransmission is at least
+		 * an RTT later).
+		 */
+		skb_mstamp_get(&now);
+		if (skb_mstamp_us_delta(&now, xmit_time) < tcp_min_rtt(tp))
+			return;
+	}
+
+	tp->rack.mstamp = *xmit_time;
+	tp->rack.advanced = 1;
+}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 7149ebc..c9c716a 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -83,7 +83,7 @@
 }
 
 /* Calculate maximal number or retries on an orphaned socket. */
-static int tcp_orphan_retries(struct sock *sk, int alive)
+static int tcp_orphan_retries(struct sock *sk, bool alive)
 {
 	int retries = sysctl_tcp_orphan_retries; /* May be zero. */
 
@@ -184,7 +184,7 @@
 
 		retry_until = sysctl_tcp_retries2;
 		if (sock_flag(sk, SOCK_DEAD)) {
-			const int alive = icsk->icsk_rto < TCP_RTO_MAX;
+			const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
 
 			retry_until = tcp_orphan_retries(sk, alive);
 			do_reset = alive ||
@@ -298,7 +298,7 @@
 
 	max_probes = sysctl_tcp_retries2;
 	if (sock_flag(sk, SOCK_DEAD)) {
-		const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
+		const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
 
 		max_probes = tcp_orphan_retries(sk, alive);
 		if (!alive && icsk->icsk_backoff >= max_probes)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f7d1d5e..24ec14f9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -375,7 +375,8 @@
 			return -1;
 		score += 4;
 	}
-
+	if (sk->sk_incoming_cpu == raw_smp_processor_id())
+		score++;
 	return score;
 }
 
@@ -419,6 +420,9 @@
 		score += 4;
 	}
 
+	if (sk->sk_incoming_cpu == raw_smp_processor_id())
+		score++;
+
 	return score;
 }
 
@@ -1017,30 +1021,14 @@
 
 		fl4 = &fl4_stack;
 
-		/* unconnected socket. If output device is enslaved to a VRF
-		 * device lookup source address from VRF table. This mimics
-		 * behavior of ip_route_connect{_init}.
-		 */
-		if (netif_index_is_vrf(net, ipc.oif)) {
-			flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
-					   RT_SCOPE_UNIVERSE, sk->sk_protocol,
-					   (flow_flags | FLOWI_FLAG_VRFSRC |
-					    FLOWI_FLAG_SKIP_NH_OIF),
-					   faddr, saddr, dport,
-					   inet->inet_sport);
-
-			rt = ip_route_output_flow(net, fl4, sk);
-			if (!IS_ERR(rt)) {
-				saddr = fl4->saddr;
-				ip_rt_put(rt);
-			}
-		}
-
 		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
 				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
 				   flow_flags,
 				   faddr, saddr, dport, inet->inet_sport);
 
+		if (!saddr && ipc.oif)
+			l3mdev_get_saddr(net, ipc.oif, fl4);
+
 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
 		rt = ip_route_output_flow(net, fl4, sk);
 		if (IS_ERR(rt)) {
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index cd6be73..7ee6518 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -30,6 +30,8 @@
 
 	mtu = dst_mtu(skb_dst(skb));
 	if (skb->len > mtu) {
+		skb->protocol = htons(ETH_P_IP);
+
 		if (skb->sk)
 			xfrm_local_error(skb, mtu);
 		else
@@ -87,17 +89,15 @@
 #ifdef CONFIG_NETFILTER
 	if (!x) {
 		IPCB(skb)->flags |= IPSKB_REROUTED;
-		return dst_output(sk, skb);
+		return dst_output(net, sk, skb);
 	}
 #endif
 
 	return x->outer_mode->afinfo->output_finish(sk, skb);
 }
 
-int xfrm4_output(struct sock *sk, struct sk_buff *skb)
+int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
-
 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 			    net, sk, skb, NULL, skb_dst(skb)->dev,
 			    __xfrm4_output,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 269b137..1e0c3c8 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -15,7 +15,7 @@
 #include <net/dst.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 
 static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
 
@@ -111,10 +111,8 @@
 	struct flowi4 *fl4 = &fl->u.ip4;
 	int oif = 0;
 
-	if (skb_dst(skb)) {
-		oif = vrf_master_ifindex(skb_dst(skb)->dev) ?
-			: skb_dst(skb)->dev->ifindex;
-	}
+	if (skb_dst(skb))
+		oif = l3mdev_fib_oif(skb_dst(skb)->dev);
 
 	memset(fl4, 0, sizeof(struct flowi4));
 	fl4->flowi4_mark = skb->mark;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c8380f1..d72fa90 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -81,6 +81,7 @@
 #include <net/ip.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/l3mdev.h>
 #include <linux/if_tunnel.h>
 #include <linux/rtnetlink.h>
 #include <linux/netconf.h>
@@ -2146,7 +2147,7 @@
 		      unsigned long expires, u32 flags)
 {
 	struct fib6_config cfg = {
-		.fc_table = RT6_TABLE_PREFIX,
+		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
 		.fc_metric = IP6_RT_PRIO_ADDRCONF,
 		.fc_ifindex = dev->ifindex,
 		.fc_expires = expires,
@@ -2179,8 +2180,9 @@
 	struct fib6_node *fn;
 	struct rt6_info *rt = NULL;
 	struct fib6_table *table;
+	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
 
-	table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
+	table = fib6_get_table(dev_net(dev), tb_id);
 	if (!table)
 		return NULL;
 
@@ -2211,7 +2213,7 @@
 static void addrconf_add_mroute(struct net_device *dev)
 {
 	struct fib6_config cfg = {
-		.fc_table = RT6_TABLE_LOCAL,
+		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
 		.fc_metric = IP6_RT_PRIO_ADDRCONF,
 		.fc_ifindex = dev->ifindex,
 		.fc_dst_len = 8,
@@ -3029,6 +3031,10 @@
 {
 	struct in6_addr addr;
 
+	/* no link local addresses on L3 master devices */
+	if (netif_is_l3_master(idev->dev))
+		return;
+
 	ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
 
 	if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY) {
@@ -3119,6 +3125,8 @@
 	}
 
 	addrconf_addr_gen(idev, true);
+	if (dev->flags & IFF_POINTOPOINT)
+		addrconf_add_mroute(dev);
 }
 #endif
 
@@ -3139,6 +3147,32 @@
 		}
 		break;
 
+	case NETDEV_CHANGEMTU:
+		/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
+		if (dev->mtu < IPV6_MIN_MTU) {
+			addrconf_ifdown(dev, 1);
+			break;
+		}
+
+		if (idev) {
+			rt6_mtu_change(dev, dev->mtu);
+			idev->cnf.mtu6 = dev->mtu;
+			break;
+		}
+
+		/* allocate new idev */
+		idev = ipv6_add_dev(dev);
+		if (IS_ERR(idev))
+			break;
+
+		/* device is still not ready */
+		if (!(idev->if_flags & IF_READY))
+			break;
+
+		run_pending = 1;
+
+		/* fall through */
+
 	case NETDEV_UP:
 	case NETDEV_CHANGE:
 		if (dev->flags & IFF_SLAVE)
@@ -3162,7 +3196,7 @@
 				idev->if_flags |= IF_READY;
 				run_pending = 1;
 			}
-		} else {
+		} else if (event == NETDEV_CHANGE) {
 			if (!addrconf_qdisc_ok(dev)) {
 				/* device is still not ready. */
 				break;
@@ -3227,24 +3261,6 @@
 		}
 		break;
 
-	case NETDEV_CHANGEMTU:
-		if (idev && dev->mtu >= IPV6_MIN_MTU) {
-			rt6_mtu_change(dev, dev->mtu);
-			idev->cnf.mtu6 = dev->mtu;
-			break;
-		}
-
-		if (!idev && dev->mtu >= IPV6_MIN_MTU) {
-			idev = ipv6_add_dev(dev);
-			if (!IS_ERR(idev))
-				break;
-		}
-
-		/*
-		 * if MTU under IPV6_MIN_MTU.
-		 * Stop IPv6 on this interface.
-		 */
-
 	case NETDEV_DOWN:
 	case NETDEV_UNREGISTER:
 		/*
@@ -4780,7 +4796,8 @@
 	return -EMSGSIZE;
 }
 
-static size_t inet6_get_link_af_size(const struct net_device *dev)
+static size_t inet6_get_link_af_size(const struct net_device *dev,
+				     u32 ext_filter_mask)
 {
 	if (!__in6_dev_get(dev))
 		return 0;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 9f777ec..ed33abf 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,6 +32,7 @@
 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
 				   int flags, pol_lookup_t lookup)
 {
+	struct rt6_info *rt;
 	struct fib_lookup_arg arg = {
 		.lookup_ptr = lookup,
 		.flags = FIB_LOOKUP_NOREF,
@@ -40,11 +41,21 @@
 	fib_rules_lookup(net->ipv6.fib6_rules_ops,
 			 flowi6_to_flowi(fl6), flags, &arg);
 
-	if (arg.result)
-		return arg.result;
+	rt = arg.result;
 
-	dst_hold(&net->ipv6.ip6_null_entry->dst);
-	return &net->ipv6.ip6_null_entry->dst;
+	if (!rt) {
+		dst_hold(&net->ipv6.ip6_null_entry->dst);
+		return &net->ipv6.ip6_null_entry->dst;
+	}
+
+	if (rt->rt6i_flags & RTF_REJECT &&
+	    rt->dst.error == -EAGAIN) {
+		ip6_rt_put(rt);
+		rt = net->ipv6.ip6_null_entry;
+		dst_hold(&rt->dst);
+	}
+
+	return &rt->dst;
 }
 
 static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 6c2b213..36c5a98 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -68,6 +68,7 @@
 #include <net/xfrm.h>
 #include <net/inet_common.h>
 #include <net/dsfield.h>
+#include <net/l3mdev.h>
 
 #include <asm/uaccess.h>
 
@@ -452,7 +453,8 @@
 	 *	and anycast addresses will be checked later.
 	 */
 	if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
-		net_dbg_ratelimited("icmp6_send: addr_any/mcast source\n");
+		net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n",
+				    &hdr->saddr, &hdr->daddr);
 		return;
 	}
 
@@ -460,7 +462,8 @@
 	 *	Never answer to a ICMP packet.
 	 */
 	if (is_ineligible(skb)) {
-		net_dbg_ratelimited("icmp6_send: no reply to icmp error\n");
+		net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n",
+				    &hdr->saddr, &hdr->daddr);
 		return;
 	}
 
@@ -496,6 +499,9 @@
 	else if (!fl6.flowi6_oif)
 		fl6.flowi6_oif = np->ucast_oif;
 
+	if (!fl6.flowi6_oif)
+		fl6.flowi6_oif = l3mdev_master_ifindex(skb->dev);
+
 	dst = icmpv6_route_lookup(net, skb, sk, &fl6);
 	if (IS_ERR(dst))
 		goto out;
@@ -509,7 +515,8 @@
 	len = skb->len - msg.offset;
 	len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr));
 	if (len < 0) {
-		net_dbg_ratelimited("icmp: len problem\n");
+		net_dbg_ratelimited("icmp: len problem [%pI6c > %pI6c]\n",
+				    &hdr->saddr, &hdr->daddr);
 		goto out_dst_release;
 	}
 
@@ -575,7 +582,7 @@
 	fl6.daddr = ipv6_hdr(skb)->saddr;
 	if (saddr)
 		fl6.saddr = *saddr;
-	fl6.flowi6_oif = skb->dev->ifindex;
+	fl6.flowi6_oif = l3mdev_fib_oif(skb->dev);
 	fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
 	fl6.flowi6_mark = mark;
 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -781,7 +788,8 @@
 		if (type & ICMPV6_INFOMSG_MASK)
 			break;
 
-		net_dbg_ratelimited("icmpv6: msg of unknown type\n");
+		net_dbg_ratelimited("icmpv6: msg of unknown type [%pI6c > %pI6c]\n",
+				    saddr, daddr);
 
 		/*
 		 * error of unknown type.
diff --git a/net/ipv6/ila.c b/net/ipv6/ila.c
index 678d2df..1a6852e 100644
--- a/net/ipv6/ila.c
+++ b/net/ipv6/ila.c
@@ -91,7 +91,7 @@
 	*(__be64 *)&ip6h->daddr = p->locator;
 }
 
-static int ila_output(struct sock *sk, struct sk_buff *skb)
+static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
 
@@ -100,7 +100,7 @@
 
 	update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
 
-	return dst->lwtstate->orig_output(sk, skb);
+	return dst->lwtstate->orig_output(net, sk, skb);
 
 drop:
 	kfree_skb(skb);
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 91b7d33..5d1c7ce 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -67,15 +67,16 @@
 
 struct dst_entry *inet6_csk_route_req(const struct sock *sk,
 				      struct flowi6 *fl6,
-				      const struct request_sock *req)
+				      const struct request_sock *req,
+				      u8 proto)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
-	struct ipv6_pinfo *np = inet6_sk(sk);
+	const struct ipv6_pinfo *np = inet6_sk(sk);
 	struct in6_addr *final_p, final;
 	struct dst_entry *dst;
 
 	memset(fl6, 0, sizeof(*fl6));
-	fl6->flowi6_proto = IPPROTO_TCP;
+	fl6->flowi6_proto = proto;
 	fl6->daddr = ireq->ir_v6_rmt_addr;
 	final_p = fl6_update_dst(fl6, np->opt, &final);
 	fl6->saddr = ireq->ir_v6_loc_addr;
@@ -91,73 +92,7 @@
 
 	return dst;
 }
-
-/*
- * request_sock (formerly open request) hash tables.
- */
-static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
-			   const u32 rnd, const u32 synq_hsize)
-{
-	u32 c;
-
-	c = jhash_3words((__force u32)raddr->s6_addr32[0],
-			 (__force u32)raddr->s6_addr32[1],
-			 (__force u32)raddr->s6_addr32[2],
-			 rnd);
-
-	c = jhash_2words((__force u32)raddr->s6_addr32[3],
-			 (__force u32)rport,
-			 c);
-
-	return c & (synq_hsize - 1);
-}
-
-struct request_sock *inet6_csk_search_req(struct sock *sk,
-					  const __be16 rport,
-					  const struct in6_addr *raddr,
-					  const struct in6_addr *laddr,
-					  const int iif)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-	struct request_sock *req;
-	u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd,
-				   lopt->nr_table_entries);
-
-	spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
-	for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
-		const struct inet_request_sock *ireq = inet_rsk(req);
-
-		if (ireq->ir_rmt_port == rport &&
-		    req->rsk_ops->family == AF_INET6 &&
-		    ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
-		    ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
-		    (!ireq->ir_iif || ireq->ir_iif == iif)) {
-			atomic_inc(&req->rsk_refcnt);
-			WARN_ON(req->sk != NULL);
-			break;
-		}
-	}
-	spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
-
-	return req;
-}
-EXPORT_SYMBOL_GPL(inet6_csk_search_req);
-
-void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
-				    struct request_sock *req,
-				    const unsigned long timeout)
-{
-	struct inet_connection_sock *icsk = inet_csk(sk);
-	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-	const u32 h = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr,
-				      inet_rsk(req)->ir_rmt_port,
-				      lopt->hash_rnd, lopt->nr_table_entries);
-
-	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
-	inet_csk_reqsk_queue_added(sk, timeout);
-}
-EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
+EXPORT_SYMBOL(inet6_csk_route_req);
 
 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
 {
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 6ac8dad..21ace5a 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -114,6 +114,8 @@
 				return -1;
 			score++;
 		}
+		if (sk->sk_incoming_cpu == raw_smp_processor_id())
+			score++;
 	}
 	return score;
 }
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 7d2e002..0c7e276 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -264,6 +264,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(fib6_get_table);
 
 static void __net_init fib6_tables_init(struct net *net)
 {
@@ -285,7 +286,17 @@
 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
 				   int flags, pol_lookup_t lookup)
 {
-	return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
+	struct rt6_info *rt;
+
+	rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
+	if (rt->rt6i_flags & RTF_REJECT &&
+	    rt->dst.error == -EAGAIN) {
+		ip6_rt_put(rt);
+		rt = net->ipv6.ip6_null_entry;
+		dst_hold(&rt->dst);
+	}
+
+	return &rt->dst;
 }
 
 static void __net_init fib6_tables_init(struct net *net)
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 08b6204..eeca943 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -264,6 +264,9 @@
 	struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
 	int err = -ENOSYS;
 
+	if (skb->encapsulation)
+		skb_set_inner_network_header(skb, nhoff);
+
 	iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
 
 	rcu_read_lock();
@@ -280,6 +283,13 @@
 	return err;
 }
 
+static int sit_gro_complete(struct sk_buff *skb, int nhoff)
+{
+	skb->encapsulation = 1;
+	skb_shinfo(skb)->gso_type |= SKB_GSO_SIT;
+	return ipv6_gro_complete(skb, nhoff);
+}
+
 static struct packet_offload ipv6_packet_offload __read_mostly = {
 	.type = cpu_to_be16(ETH_P_IPV6),
 	.callbacks = {
@@ -292,6 +302,8 @@
 static const struct net_offload sit_offload = {
 	.callbacks = {
 		.gso_segment	= ipv6_gso_segment,
+		.gro_receive    = ipv6_gro_receive,
+		.gro_complete   = sit_gro_complete,
 	},
 };
 
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a598fe2..c265068 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -28,6 +28,7 @@
 
 #include <linux/errno.h>
 #include <linux/kernel.h>
+#include <linux/overflow-arith.h>
 #include <linux/string.h>
 #include <linux/socket.h>
 #include <linux/net.h>
@@ -55,12 +56,12 @@
 #include <net/xfrm.h>
 #include <net/checksum.h>
 #include <linux/mroute6.h>
+#include <net/l3mdev.h>
 
-static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
+static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
 	struct net_device *dev = dst->dev;
-	struct net *net = dev_net(dev);
 	struct neighbour *neigh;
 	struct in6_addr *nexthop;
 	int ret;
@@ -126,16 +127,15 @@
 	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 	    dst_allfrag(skb_dst(skb)) ||
 	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
-		return ip6_fragment(sk, skb, ip6_finish_output2);
+		return ip6_fragment(net, sk, skb, ip6_finish_output2);
 	else
-		return ip6_finish_output2(sk, skb);
+		return ip6_finish_output2(net, sk, skb);
 }
 
-int ip6_output(struct sock *sk, struct sk_buff *skb)
+int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct net_device *dev = skb_dst(skb)->dev;
 	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
-	struct net *net = dev_net(dev);
 
 	if (unlikely(idev->cnf.disable_ipv6)) {
 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
@@ -234,7 +234,7 @@
 		 */
 		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 			       net, (struct sock *)sk, skb, NULL, dst->dev,
-			       dst_output_okfn);
+			       dst_output);
 	}
 
 	skb->dev = dst->dev;
@@ -334,7 +334,7 @@
 				     struct sk_buff *skb)
 {
 	skb_sender_cpu_clear(skb);
-	return dst_output(sk, skb);
+	return dst_output(net, sk, skb);
 }
 
 static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
@@ -390,6 +390,9 @@
 	if (skb->pkt_type != PACKET_HOST)
 		goto drop;
 
+	if (unlikely(skb->sk))
+		goto drop;
+
 	if (skb_warn_if_lro(skb))
 		goto drop;
 
@@ -554,8 +557,8 @@
 	skb_copy_secmark(to, from);
 }
 
-int ip6_fragment(struct sock *sk, struct sk_buff *skb,
-		 int (*output)(struct sock *, struct sk_buff *))
+int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+		 int (*output)(struct net *, struct sock *, struct sk_buff *))
 {
 	struct sk_buff *frag;
 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
@@ -568,7 +571,6 @@
 	__be32 frag_id;
 	int ptr, offset = 0, err = 0;
 	u8 *prevhdr, nexthdr = 0;
-	struct net *net = dev_net(skb_dst(skb)->dev);
 
 	hlen = ip6_find_1stfragopt(skb, &prevhdr);
 	nexthdr = *prevhdr;
@@ -595,7 +597,10 @@
 		if (np->frag_size)
 			mtu = np->frag_size;
 	}
-	mtu -= hlen + sizeof(struct frag_hdr);
+
+	if (overflow_usub(mtu, hlen + sizeof(struct frag_hdr), &mtu) ||
+	    mtu <= 7)
+		goto fail_toobig;
 
 	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
 				    &ipv6_hdr(skb)->saddr);
@@ -688,7 +693,7 @@
 				ip6_copy_metadata(frag, skb);
 			}
 
-			err = output(sk, skb);
+			err = output(net, sk, skb);
 			if (!err)
 				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 					      IPSTATS_MIB_FRAGCREATES);
@@ -816,7 +821,7 @@
 		/*
 		 *	Put this fragment into the sending queue.
 		 */
-		err = output(sk, frag);
+		err = output(net, sk, frag);
 		if (err)
 			goto fail;
 
@@ -888,7 +893,8 @@
 #ifdef CONFIG_IPV6_SUBTREES
 	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
 #endif
-	    (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
+	   (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
+	      (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
 		dst_release(dst);
 		dst = NULL;
 	}
@@ -1040,7 +1046,7 @@
 	if (final_dst)
 		fl6->daddr = *final_dst;
 	if (!fl6->flowi6_oif)
-		fl6->flowi6_oif = dst->dev->ifindex;
+		fl6->flowi6_oif = l3mdev_fib_oif(dst->dev);
 
 	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
@@ -1694,7 +1700,7 @@
 	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
 	int err;
 
-	err = ip6_local_out(skb);
+	err = ip6_local_out(net, skb->sk, skb);
 	if (err) {
 		if (err > 0)
 			err = net_xmit_errno(err);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index f96f1c1..0a8610b 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -482,7 +482,7 @@
 		return -EMSGSIZE;
 	}
 
-	err = dst_output(skb->sk, skb);
+	err = dst_output(t->net, skb->sk, skb);
 	if (net_xmit_eval(err) == 0) {
 		struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
 
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 5e5d16e..ad19136 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1991,7 +1991,7 @@
 			 IPSTATS_MIB_OUTFORWDATAGRAMS);
 	IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
 			 IPSTATS_MIB_OUTOCTETS, skb->len);
-	return dst_output(sk, skb);
+	return dst_output(net, sk, skb);
 }
 
 /*
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index a8bf57c..124338a 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1646,7 +1646,7 @@
 
 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 		      net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
-		      dst_output_okfn);
+		      dst_output);
 out:
 	if (!err) {
 		ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
@@ -2010,7 +2010,7 @@
 	skb_dst_set(skb, dst);
 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 		      net, sk, skb, NULL, skb->dev,
-		      dst_output_okfn);
+		      dst_output);
 out:
 	if (!err) {
 		ICMP6MSGOUT_INC_STATS(net, idev, type);
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index b9779d4..60c79a0 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -118,7 +118,7 @@
 
 struct mip6_report_rate_limiter {
 	spinlock_t lock;
-	struct timeval stamp;
+	ktime_t stamp;
 	int iif;
 	struct in6_addr src;
 	struct in6_addr dst;
@@ -184,20 +184,18 @@
 	return 0;
 }
 
-static inline int mip6_report_rl_allow(struct timeval *stamp,
+static inline int mip6_report_rl_allow(ktime_t stamp,
 				       const struct in6_addr *dst,
 				       const struct in6_addr *src, int iif)
 {
 	int allow = 0;
 
 	spin_lock_bh(&mip6_report_rl.lock);
-	if (mip6_report_rl.stamp.tv_sec != stamp->tv_sec ||
-	    mip6_report_rl.stamp.tv_usec != stamp->tv_usec ||
+	if (!ktime_equal(mip6_report_rl.stamp, stamp) ||
 	    mip6_report_rl.iif != iif ||
 	    !ipv6_addr_equal(&mip6_report_rl.src, src) ||
 	    !ipv6_addr_equal(&mip6_report_rl.dst, dst)) {
-		mip6_report_rl.stamp.tv_sec = stamp->tv_sec;
-		mip6_report_rl.stamp.tv_usec = stamp->tv_usec;
+		mip6_report_rl.stamp = stamp;
 		mip6_report_rl.iif = iif;
 		mip6_report_rl.src = *src;
 		mip6_report_rl.dst = *dst;
@@ -216,7 +214,7 @@
 	struct ipv6_destopt_hao *hao = NULL;
 	struct xfrm_selector sel;
 	int offset;
-	struct timeval stamp;
+	ktime_t stamp;
 	int err = 0;
 
 	if (unlikely(fl6->flowi6_proto == IPPROTO_MH &&
@@ -230,9 +228,9 @@
 					(skb_network_header(skb) + offset);
 	}
 
-	skb_get_timestamp(skb, &stamp);
+	stamp = skb_get_ktime(skb);
 
-	if (!mip6_report_rl_allow(&stamp, &ipv6_hdr(skb)->daddr,
+	if (!mip6_report_rl_allow(stamp, &ipv6_hdr(skb)->daddr,
 				  hao ? &hao->addr : &ipv6_hdr(skb)->saddr,
 				  opt->iif))
 		goto out;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 7089c30..3e0f855 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -67,6 +67,7 @@
 #include <net/flow.h>
 #include <net/ip6_checksum.h>
 #include <net/inet_common.h>
+#include <net/l3mdev.h>
 #include <linux/proc_fs.h>
 
 #include <linux/netfilter.h>
@@ -147,6 +148,7 @@
 	.gc_thresh2 =	 512,
 	.gc_thresh3 =	1024,
 };
+EXPORT_SYMBOL_GPL(nd_tbl);
 
 static void ndisc_fill_addr_option(struct sk_buff *skb, int type, void *data)
 {
@@ -441,8 +443,11 @@
 
 	if (!dst) {
 		struct flowi6 fl6;
+		int oif = l3mdev_fib_oif(skb->dev);
 
-		icmpv6_flow_init(sk, &fl6, type, saddr, daddr, skb->dev->ifindex);
+		icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif);
+		if (oif != skb->dev->ifindex)
+			fl6.flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
 		dst = icmp6_dst_alloc(skb->dev, &fl6);
 		if (IS_ERR(dst)) {
 			kfree_skb(skb);
@@ -465,7 +470,7 @@
 
 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 		      net, sk, skb, NULL, dst->dev,
-		      dst_output_okfn);
+		      dst_output);
 	if (!err) {
 		ICMP6MSGOUT_INC_STATS(net, idev, type);
 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
@@ -766,7 +771,7 @@
 
 	ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1);
 	if (ifp) {
-
+have_ifp:
 		if (ifp->flags & (IFA_F_TENTATIVE|IFA_F_OPTIMISTIC)) {
 			if (dad) {
 				/*
@@ -792,6 +797,18 @@
 	} else {
 		struct net *net = dev_net(dev);
 
+		/* perhaps an address on the master device */
+		if (netif_is_l3_slave(dev)) {
+			struct net_device *mdev;
+
+			mdev = netdev_master_upper_dev_get_rcu(dev);
+			if (mdev) {
+				ifp = ipv6_get_ifaddr(net, &msg->target, mdev, 1);
+				if (ifp)
+					goto have_ifp;
+			}
+		}
+
 		idev = in6_dev_get(dev);
 		if (!idev) {
 			/* XXX: count this drop? */
@@ -1483,6 +1500,7 @@
 	struct flowi6 fl6;
 	int rd_len;
 	u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
+	int oif = l3mdev_fib_oif(dev);
 	bool ret;
 
 	if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
@@ -1499,7 +1517,10 @@
 	}
 
 	icmpv6_flow_init(sk, &fl6, NDISC_REDIRECT,
-			 &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex);
+			 &saddr_buf, &ipv6_hdr(skb)->saddr, oif);
+
+	if (oif != skb->dev->ifindex)
+		fl6.flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
 
 	dst = ip6_route_output(net, NULL, &fl6);
 	if (dst->error) {
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index b4de08a..d11c468 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -18,9 +18,8 @@
 #include <net/ip6_checksum.h>
 #include <net/netfilter/nf_queue.h>
 
-int ip6_route_me_harder(struct sk_buff *skb)
+int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
 	const struct ipv6hdr *iph = ipv6_hdr(skb);
 	unsigned int hh_len;
 	struct dst_entry *dst;
@@ -93,7 +92,7 @@
 	}
 }
 
-static int nf_ip6_reroute(struct sk_buff *skb,
+static int nf_ip6_reroute(struct net *net, struct sk_buff *skb,
 			  const struct nf_queue_entry *entry)
 {
 	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
@@ -103,7 +102,7 @@
 		if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
 		    !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
 		    skb->mark != rt_info->mark)
-			return ip6_route_me_harder(skb);
+			return ip6_route_me_harder(net, skb);
 	}
 	return 0;
 }
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 96833e4..f6a024e 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -58,6 +58,7 @@
 
 config NF_DUP_IPV6
 	tristate "Netfilter IPv6 packet duplication to alternate destination"
+	depends on !NF_CONNTRACK || NF_CONNTRACK
 	help
 	  This option enables the nf_dup_ipv6 core, which duplicates an IPv6
 	  packet to be rerouted to another destination.
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 80e3bd7..99425cf 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -117,7 +117,7 @@
 	if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
 		dprintf("VIA in mismatch (%s vs %s).%s\n",
 			indev, ip6info->iniface,
-			ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
+			ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : "");
 		return false;
 	}
 
@@ -126,14 +126,14 @@
 	if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
 		dprintf("VIA out mismatch (%s vs %s).%s\n",
 			outdev, ip6info->outiface,
-			ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
+			ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : "");
 		return false;
 	}
 
 /* ... might want to do something with class and flowlabel here ... */
 
 	/* look for the desired protocol header */
-	if((ip6info->flags & IP6T_F_PROTO)) {
+	if (ip6info->flags & IP6T_F_PROTO) {
 		int protohdr;
 		unsigned short _frag_off;
 
@@ -151,9 +151,9 @@
 				ip6info->proto);
 
 		if (ip6info->proto == protohdr) {
-			if(ip6info->invflags & IP6T_INV_PROTO) {
+			if (ip6info->invflags & IP6T_INV_PROTO)
 				return false;
-			}
+
 			return true;
 		}
 
@@ -443,8 +443,8 @@
 			break;
 	} while (!acpar.hotdrop);
 
- 	xt_write_recseq_end(addend);
- 	local_bh_enable();
+	xt_write_recseq_end(addend);
+	local_bh_enable();
 
 #ifdef DEBUG_ALLOW_ALL
 	return NF_ACCEPT;
@@ -561,7 +561,7 @@
 				pos = newpos;
 			}
 		}
-		next:
+next:
 		duprintf("Finished chain %u\n", hook);
 	}
 	return 1;
@@ -816,7 +816,7 @@
    newinfo) */
 static int
 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
-                const struct ip6t_replace *repl)
+		const struct ip6t_replace *repl)
 {
 	struct ip6t_entry *iter;
 	unsigned int i;
@@ -1090,7 +1090,7 @@
 #endif
 
 static int get_info(struct net *net, void __user *user,
-                    const int *len, int compat)
+		    const int *len, int compat)
 {
 	char name[XT_TABLE_MAXNAMELEN];
 	struct xt_table *t;
@@ -1152,7 +1152,7 @@
 
 static int
 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
-            const int *len)
+	    const int *len)
 {
 	int ret;
 	struct ip6t_get_entries get;
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index c235660..3deed58 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -76,7 +76,7 @@
 		nf_conntrack_get(nfct);
 	}
 
-	ip6_local_out(nskb);
+	ip6_local_out(net, nskb->sk, nskb);
 	return;
 
 free_nskb:
@@ -244,7 +244,7 @@
 	synproxy_build_options(nth, opts);
 
 	synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
-	                  niph, nth, tcp_hdr_size);
+			  niph, nth, tcp_hdr_size);
 }
 
 static bool
@@ -458,14 +458,12 @@
 static struct nf_hook_ops ipv6_synproxy_ops[] __read_mostly = {
 	{
 		.hook		= ipv6_synproxy_hook,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP_PRI_CONNTRACK_CONFIRM - 1,
 	},
 	{
 		.hook		= ipv6_synproxy_hook,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_POST_ROUTING,
 		.priority	= NF_IP_PRI_CONNTRACK_CONFIRM - 1,
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 8745b59..abe278b 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -65,7 +65,7 @@
 	     skb->mark != mark ||
 	     ipv6_hdr(skb)->hop_limit != hop_limit ||
 	     flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
-		err = ip6_route_me_harder(skb);
+		err = ip6_route_me_harder(state->net, skb);
 		if (err < 0)
 			ret = NF_DROP_ERR(err);
 	}
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index abea175..de2a10a 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -70,7 +70,6 @@
 	/* Before packet filtering, change destination */
 	{
 		.hook		= ip6table_nat_in,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_PRE_ROUTING,
 		.priority	= NF_IP6_PRI_NAT_DST,
@@ -78,7 +77,6 @@
 	/* After packet filtering, change source */
 	{
 		.hook		= ip6table_nat_out,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_POST_ROUTING,
 		.priority	= NF_IP6_PRI_NAT_SRC,
@@ -86,7 +84,6 @@
 	/* Before packet filtering, change destination */
 	{
 		.hook		= ip6table_nat_local_fn,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP6_PRI_NAT_DST,
@@ -94,7 +91,6 @@
 	/* After packet filtering, change source */
 	{
 		.hook		= ip6table_nat_fn,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP6_PRI_NAT_SRC,
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index dd83ad4..1aa5848 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -187,42 +187,36 @@
 static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
 	{
 		.hook		= ipv6_conntrack_in,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_PRE_ROUTING,
 		.priority	= NF_IP6_PRI_CONNTRACK,
 	},
 	{
 		.hook		= ipv6_conntrack_local,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP6_PRI_CONNTRACK,
 	},
 	{
 		.hook		= ipv6_helper,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_POST_ROUTING,
 		.priority	= NF_IP6_PRI_CONNTRACK_HELPER,
 	},
 	{
 		.hook		= ipv6_confirm,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_POST_ROUTING,
 		.priority	= NF_IP6_PRI_LAST,
 	},
 	{
 		.hook		= ipv6_helper,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP6_PRI_CONNTRACK_HELPER,
 	},
 	{
 		.hook		= ipv6_confirm,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP6_PRI_LAST-1,
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index d3b7974..660bc10 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -57,12 +57,12 @@
 	[ICMPV6_ECHO_REQUEST - 128]	= ICMPV6_ECHO_REPLY + 1,
 	[ICMPV6_ECHO_REPLY - 128]	= ICMPV6_ECHO_REQUEST + 1,
 	[ICMPV6_NI_QUERY - 128]		= ICMPV6_NI_REPLY + 1,
-	[ICMPV6_NI_REPLY - 128]		= ICMPV6_NI_QUERY +1
+	[ICMPV6_NI_REPLY - 128]		= ICMPV6_NI_QUERY + 1
 };
 
 static const u_int8_t noct_valid_new[] = {
 	[ICMPV6_MGM_QUERY - 130] = 1,
-	[ICMPV6_MGM_REPORT -130] = 1,
+	[ICMPV6_MGM_REPORT - 130] = 1,
 	[ICMPV6_MGM_REDUCTION - 130] = 1,
 	[NDISC_ROUTER_SOLICITATION - 130] = 1,
 	[NDISC_ROUTER_ADVERTISEMENT - 130] = 1,
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 701cd2b..056f5d4 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -59,7 +59,7 @@
 	struct sk_buff		*orig;
 };
 
-#define NFCT_FRAG6_CB(skb)	((struct nf_ct_frag6_skb_cb*)((skb)->cb))
+#define NFCT_FRAG6_CB(skb)	((struct nf_ct_frag6_skb_cb *)((skb)->cb))
 
 static struct inet_frags nf_frags;
 
@@ -445,7 +445,7 @@
 	skb_reset_transport_header(head);
 	skb_push(head, head->data - skb_network_header(head));
 
-	for (fp=head->next; fp; fp = fp->next) {
+	for (fp = head->next; fp; fp = fp->next) {
 		head->data_len += fp->len;
 		head->len += fp->len;
 		if (head->ip_summed != fp->ip_summed)
@@ -563,12 +563,10 @@
 	return 0;
 }
 
-struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
+struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
 {
 	struct sk_buff *clone;
 	struct net_device *dev = skb->dev;
-	struct net *net = skb_dst(skb) ? dev_net(skb_dst(skb)->dev)
-				       : dev_net(skb->dev);
 	struct frag_hdr *fhdr;
 	struct frag_queue *fq;
 	struct ipv6hdr *hdr;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index a99baf6..4fdbed5e 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -63,7 +63,8 @@
 		return NF_ACCEPT;
 #endif
 
-	reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(state->hook, skb));
+	reasm = nf_ct_frag6_gather(state->net, skb,
+				   nf_ct6_defrag_user(state->hook, skb));
 	/* queued */
 	if (reasm == NULL)
 		return NF_STOLEN;
@@ -84,14 +85,12 @@
 static struct nf_hook_ops ipv6_defrag_ops[] = {
 	{
 		.hook		= ipv6_defrag,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_PRE_ROUTING,
 		.priority	= NF_IP6_PRI_CONNTRACK_DEFRAG,
 	},
 	{
 		.hook		= ipv6_defrag,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP6_PRI_CONNTRACK_DEFRAG,
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
index ee0d9a5..6989c70 100644
--- a/net/ipv6/netfilter/nf_dup_ipv6.c
+++ b/net/ipv6/netfilter/nf_dup_ipv6.c
@@ -68,7 +68,7 @@
 	}
 	if (nf_dup_ipv6_route(net, skb, gw, oif)) {
 		__this_cpu_write(nf_skb_duplicated, true);
-		ip6_local_out(skb);
+		ip6_local_out(net, skb->sk, skb);
 		__this_cpu_write(nf_skb_duplicated, false);
 	} else {
 		kfree_skb(skb);
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index 18e835f..238e70c 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -437,7 +437,7 @@
 
 		if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
 				      &ct->tuplehash[!dir].tuple.src.u3)) {
-			err = ip6_route_me_harder(skb);
+			err = ip6_route_me_harder(state->net, skb);
 			if (err < 0)
 				ret = NF_DROP_ERR(err);
 		}
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 94b4c6d..e0f922b 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -26,7 +26,7 @@
 	int tcphoff;
 
 	proto = oip6h->nexthdr;
-	tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data),
+	tcphoff = ipv6_skip_exthdr(oldskb, ((u8 *)(oip6h + 1) - oldskb->data),
 				   &proto, &frag_off);
 
 	if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
@@ -206,7 +206,7 @@
 		dev_queue_xmit(nskb);
 	} else
 #endif
-		ip6_local_out(nskb);
+		ip6_local_out(net, nskb->sk, nskb);
 }
 EXPORT_SYMBOL_GPL(nf_send_reset6);
 
@@ -224,7 +224,7 @@
 		return true;
 
 	proto = ip6h->nexthdr;
-	thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
+	thoff = ipv6_skip_exthdr(skb, ((u8 *)(ip6h + 1) - skb->data), &proto, &fo);
 
 	if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
 		return false;
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index d42bbc1..71d995f 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -52,7 +52,7 @@
 	     skb->mark != mark ||
 	     ipv6_hdr(skb)->hop_limit != hop_limit ||
 	     flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
-		return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP;
+		return ip6_route_me_harder(state->net, skb) == 0 ? ret : NF_DROP;
 
 	return ret;
 }
@@ -61,11 +61,11 @@
 	.name		= "route",
 	.type		= NFT_CHAIN_T_ROUTE,
 	.family		= NFPROTO_IPV6,
-        .owner		= THIS_MODULE,
+	.owner		= THIS_MODULE,
 	.hook_mask	= (1 << NF_INET_LOCAL_OUT),
 	.hooks		= {
-                [NF_INET_LOCAL_OUT]	= nf_route_table_hook,
-        },
+		[NF_INET_LOCAL_OUT]	= nf_route_table_hook,
+	},
 };
 
 static int __init nft_chain_route_init(void)
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index e77102c..462f2a76b 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -138,9 +138,8 @@
 EXPORT_SYMBOL(ip6_dst_hoplimit);
 #endif
 
-static int __ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
+int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
 	int len;
 
 	len = skb->len - sizeof(struct ipv6hdr);
@@ -151,29 +150,18 @@
 
 	return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 		       net, sk, skb, NULL, skb_dst(skb)->dev,
-		       dst_output_okfn);
-}
-
-int __ip6_local_out(struct sk_buff *skb)
-{
-	return __ip6_local_out_sk(skb->sk, skb);
+		       dst_output);
 }
 EXPORT_SYMBOL_GPL(__ip6_local_out);
 
-int ip6_local_out_sk(struct sock *sk, struct sk_buff *skb)
+int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	int err;
 
-	err = __ip6_local_out_sk(sk, skb);
+	err = __ip6_local_out(net, sk, skb);
 	if (likely(err == 1))
-		err = dst_output(sk, skb);
+		err = dst_output(net, sk, skb);
 
 	return err;
 }
-EXPORT_SYMBOL_GPL(ip6_local_out_sk);
-
-int ip6_local_out(struct sk_buff *skb)
-{
-	return ip6_local_out_sk(skb->sk, skb);
-}
 EXPORT_SYMBOL_GPL(ip6_local_out);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index fec0151..dc65ec1 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -655,7 +655,7 @@
 
 	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
-		      NULL, rt->dst.dev, dst_output_okfn);
+		      NULL, rt->dst.dev, dst_output);
 	if (err > 0)
 		err = net_xmit_errno(err);
 	if (err)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 66a6b2c..2701cb3 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -61,6 +61,7 @@
 #include <net/nexthop.h>
 #include <net/lwtunnel.h>
 #include <net/ip_tunnels.h>
+#include <net/l3mdev.h>
 
 #include <asm/uaccess.h>
 
@@ -86,9 +87,9 @@
 static int		 ip6_dst_gc(struct dst_ops *ops);
 
 static int		ip6_pkt_discard(struct sk_buff *skb);
-static int		ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb);
+static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 static int		ip6_pkt_prohibit(struct sk_buff *skb);
-static int		ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb);
+static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
 static void		ip6_link_failure(struct sk_buff *skb);
 static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
 					   struct sk_buff *skb, u32 mtu);
@@ -142,6 +143,9 @@
 	struct net_device *loopback_dev = net->loopback_dev;
 	int cpu;
 
+	if (dev == loopback_dev)
+		return;
+
 	for_each_possible_cpu(cpu) {
 		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
 		struct rt6_info *rt;
@@ -151,14 +155,12 @@
 			struct inet6_dev *rt_idev = rt->rt6i_idev;
 			struct net_device *rt_dev = rt->dst.dev;
 
-			if (rt_idev && (rt_idev->dev == dev || !dev) &&
-			    rt_idev->dev != loopback_dev) {
+			if (rt_idev->dev == dev) {
 				rt->rt6i_idev = in6_dev_get(loopback_dev);
 				in6_dev_put(rt_idev);
 			}
 
-			if (rt_dev && (rt_dev == dev || !dev) &&
-			    rt_dev != loopback_dev) {
+			if (rt_dev == dev) {
 				rt->dst.dev = loopback_dev;
 				dev_hold(rt->dst.dev);
 				dev_put(rt_dev);
@@ -247,12 +249,6 @@
 {
 }
 
-static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
-					 unsigned long old)
-{
-	return NULL;
-}
-
 static struct dst_ops ip6_dst_blackhole_ops = {
 	.family			=	AF_INET6,
 	.destroy		=	ip6_dst_destroy,
@@ -261,7 +257,7 @@
 	.default_advmss		=	ip6_default_advmss,
 	.update_pmtu		=	ip6_rt_blackhole_update_pmtu,
 	.redirect		=	ip6_rt_blackhole_redirect,
-	.cow_metrics		=	ip6_rt_blackhole_cow_metrics,
+	.cow_metrics		=	dst_cow_metrics_generic,
 	.neigh_lookup		=	ip6_neigh_lookup,
 };
 
@@ -308,7 +304,7 @@
 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
 		.error		= -EINVAL,
 		.input		= dst_discard,
-		.output		= dst_discard_sk,
+		.output		= dst_discard_out,
 	},
 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
 	.rt6i_protocol  = RTPROT_KERNEL,
@@ -318,6 +314,15 @@
 
 #endif
 
+static void rt6_info_init(struct rt6_info *rt)
+{
+	struct dst_entry *dst = &rt->dst;
+
+	memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
+	INIT_LIST_HEAD(&rt->rt6i_siblings);
+	INIT_LIST_HEAD(&rt->rt6i_uncached);
+}
+
 /* allocate dst with ip6_dst_ops */
 static struct rt6_info *__ip6_dst_alloc(struct net *net,
 					struct net_device *dev,
@@ -326,13 +331,9 @@
 	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
 					0, DST_OBSOLETE_FORCE_CHK, flags);
 
-	if (rt) {
-		struct dst_entry *dst = &rt->dst;
+	if (rt)
+		rt6_info_init(rt);
 
-		memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
-		INIT_LIST_HEAD(&rt->rt6i_siblings);
-		INIT_LIST_HEAD(&rt->rt6i_uncached);
-	}
 	return rt;
 }
 
@@ -1044,6 +1045,9 @@
 	fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
 	saved_fn = fn;
 
+	if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
+		oif = 0;
+
 redo_rt6_select:
 	rt = rt6_select(fn, oif, strict);
 	if (rt->rt6i_nsiblings)
@@ -1141,7 +1145,7 @@
 	int flags = RT6_LOOKUP_F_HAS_SADDR;
 	struct ip_tunnel_info *tun_info;
 	struct flowi6 fl6 = {
-		.flowi6_iif = skb->dev->ifindex,
+		.flowi6_iif = l3mdev_fib_oif(skb->dev),
 		.daddr = iph->daddr,
 		.saddr = iph->saddr,
 		.flowlabel = ip6_flowinfo(iph),
@@ -1165,14 +1169,22 @@
 struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
 				    struct flowi6 *fl6)
 {
+	struct dst_entry *dst;
 	int flags = 0;
+	bool any_src;
+
+	dst = l3mdev_rt6_dst_by_oif(net, fl6);
+	if (dst)
+		return dst;
 
 	fl6->flowi6_iif = LOOPBACK_IFINDEX;
 
-	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
+	any_src = ipv6_addr_any(&fl6->saddr);
+	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
+	    (fl6->flowi6_oif && any_src))
 		flags |= RT6_LOOKUP_F_IFACE;
 
-	if (!ipv6_addr_any(&fl6->saddr))
+	if (!any_src)
 		flags |= RT6_LOOKUP_F_HAS_SADDR;
 	else if (sk)
 		flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
@@ -1188,24 +1200,20 @@
 
 	rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
 	if (rt) {
+		rt6_info_init(rt);
+
 		new = &rt->dst;
-
-		memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
-
 		new->__use = 1;
 		new->input = dst_discard;
-		new->output = dst_discard_sk;
+		new->output = dst_discard_out;
 
-		if (dst_metrics_read_only(&ort->dst))
-			new->_metrics = ort->dst._metrics;
-		else
-			dst_copy_metrics(new, &ort->dst);
+		dst_copy_metrics(new, &ort->dst);
 		rt->rt6i_idev = ort->rt6i_idev;
 		if (rt->rt6i_idev)
 			in6_dev_hold(rt->rt6i_idev);
 
 		rt->rt6i_gateway = ort->rt6i_gateway;
-		rt->rt6i_flags = ort->rt6i_flags;
+		rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
 		rt->rt6i_metric = 0;
 
 		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
@@ -1723,21 +1731,21 @@
 	return -EINVAL;
 }
 
-int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
+static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
 {
-	int err;
 	struct net *net = cfg->fc_nlinfo.nl_net;
 	struct rt6_info *rt = NULL;
 	struct net_device *dev = NULL;
 	struct inet6_dev *idev = NULL;
 	struct fib6_table *table;
 	int addr_type;
+	int err = -EINVAL;
 
 	if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
-		return -EINVAL;
+		goto out;
 #ifndef CONFIG_IPV6_SUBTREES
 	if (cfg->fc_src_len)
-		return -EINVAL;
+		goto out;
 #endif
 	if (cfg->fc_ifindex) {
 		err = -ENODEV;
@@ -1852,7 +1860,7 @@
 		switch (cfg->fc_type) {
 		case RTN_BLACKHOLE:
 			rt->dst.error = -EINVAL;
-			rt->dst.output = dst_discard_sk;
+			rt->dst.output = dst_discard_out;
 			rt->dst.input = dst_discard;
 			break;
 		case RTN_PROHIBIT:
@@ -1957,9 +1965,7 @@
 
 	cfg->fc_nlinfo.nl_net = dev_net(dev);
 
-	*rt_ret = rt;
-
-	return 0;
+	return rt;
 out:
 	if (dev)
 		dev_put(dev);
@@ -1968,20 +1974,21 @@
 	if (rt)
 		dst_free(&rt->dst);
 
-	*rt_ret = NULL;
-
-	return err;
+	return ERR_PTR(err);
 }
 
 int ip6_route_add(struct fib6_config *cfg)
 {
 	struct mx6_config mxc = { .mx = NULL, };
-	struct rt6_info *rt = NULL;
+	struct rt6_info *rt;
 	int err;
 
-	err = ip6_route_info_create(cfg, &rt);
-	if (err)
+	rt = ip6_route_info_create(cfg);
+	if (IS_ERR(rt)) {
+		err = PTR_ERR(rt);
+		rt = NULL;
 		goto out;
+	}
 
 	err = ip6_convert_metrics(&mxc, cfg);
 	if (err)
@@ -2263,7 +2270,6 @@
 					   unsigned int pref)
 {
 	struct fib6_config cfg = {
-		.fc_table	= RT6_TABLE_INFO,
 		.fc_metric	= IP6_RT_PRIO_USER,
 		.fc_ifindex	= ifindex,
 		.fc_dst_len	= prefixlen,
@@ -2274,6 +2280,7 @@
 		.fc_nlinfo.nl_net = net,
 	};
 
+	cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
 	cfg.fc_dst = *prefix;
 	cfg.fc_gateway = *gwaddr;
 
@@ -2314,7 +2321,7 @@
 				     unsigned int pref)
 {
 	struct fib6_config cfg = {
-		.fc_table	= RT6_TABLE_DFLT,
+		.fc_table	= l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
 		.fc_metric	= IP6_RT_PRIO_USER,
 		.fc_ifindex	= dev->ifindex,
 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
@@ -2361,7 +2368,8 @@
 {
 	memset(cfg, 0, sizeof(*cfg));
 
-	cfg->fc_table = RT6_TABLE_MAIN;
+	cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
+			 : RT6_TABLE_MAIN;
 	cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
 	cfg->fc_metric = rtmsg->rtmsg_metric;
 	cfg->fc_expires = rtmsg->rtmsg_info;
@@ -2445,7 +2453,7 @@
 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
 }
 
-static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb)
+static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	skb->dev = skb_dst(skb)->dev;
 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
@@ -2456,7 +2464,7 @@
 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
 }
 
-static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb)
+static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	skb->dev = skb_dst(skb)->dev;
 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
@@ -2470,6 +2478,7 @@
 				    const struct in6_addr *addr,
 				    bool anycast)
 {
+	u32 tb_id;
 	struct net *net = dev_net(idev->dev);
 	struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
 					    DST_NOCOUNT);
@@ -2492,7 +2501,8 @@
 	rt->rt6i_gateway  = *addr;
 	rt->rt6i_dst.addr = *addr;
 	rt->rt6i_dst.plen = 128;
-	rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
+	tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
+	rt->rt6i_table = fib6_get_table(net, tb_id);
 	rt->dst.flags |= DST_NOCACHE;
 
 	atomic_set(&rt->dst.__refcnt, 1);
@@ -2597,7 +2607,8 @@
 
 	fib6_clean_all(net, fib6_ifdown, &adn);
 	icmp6_clean_all(fib6_ifdown, &adn);
-	rt6_uncached_list_flush_dev(net, dev);
+	if (dev)
+		rt6_uncached_list_flush_dev(net, dev);
 }
 
 struct rt6_mtu_change_arg {
@@ -2870,9 +2881,12 @@
 				r_cfg.fc_encap_type = nla_get_u16(nla);
 		}
 
-		err = ip6_route_info_create(&r_cfg, &rt);
-		if (err)
+		rt = ip6_route_info_create(&r_cfg);
+		if (IS_ERR(rt)) {
+			err = PTR_ERR(rt);
+			rt = NULL;
 			goto cleanup;
+		}
 
 		err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
 		if (err) {
@@ -3251,6 +3265,11 @@
 	} else {
 		fl6.flowi6_oif = oif;
 
+		if (netif_index_is_l3_master(net, oif)) {
+			fl6.flowi6_flags = FLOWI_FLAG_L3MDEV_SRC |
+					   FLOWI_FLAG_SKIP_NH_OIF;
+		}
+
 		rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
 	}
 
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 2461b3f..bb8f2fa 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -114,14 +114,11 @@
 }
 EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
 
-__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp)
+__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mssp)
 {
 	const struct ipv6hdr *iph = ipv6_hdr(skb);
 	const struct tcphdr *th = tcp_hdr(skb);
 
-	tcp_synq_overflow(sk);
-	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
-
 	return __cookie_v6_init_sequence(iph, th, mssp);
 }
 
@@ -173,7 +170,7 @@
 		goto out;
 
 	ret = NULL;
-	req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk);
+	req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk, false);
 	if (!req)
 		goto out;
 
@@ -238,9 +235,9 @@
 			goto out_free;
 	}
 
-	req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
+	req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
 	tcp_select_initial_window(tcp_full_space(sk), req->mss,
-				  &req->rcv_wnd, &req->window_clamp,
+				  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
 				  ireq->wscale_ok, &rcv_wscale,
 				  dst_metric(dst, RTAX_INITRWND));
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 16fb299..714bc5a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -70,8 +70,8 @@
 #include <linux/crypto.h>
 #include <linux/scatterlist.h>
 
-static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
-static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
+static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 				      struct request_sock *req);
 
 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
@@ -82,7 +82,7 @@
 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
 #else
-static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
+static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
 						   const struct in6_addr *addr)
 {
 	return NULL;
@@ -437,8 +437,8 @@
 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
 			      struct flowi *fl,
 			      struct request_sock *req,
-			      u16 queue_mapping,
-			      struct tcp_fastopen_cookie *foc)
+			      struct tcp_fastopen_cookie *foc,
+			      bool attach_req)
 {
 	struct inet_request_sock *ireq = inet_rsk(req);
 	struct ipv6_pinfo *np = inet6_sk(sk);
@@ -447,10 +447,11 @@
 	int err = -ENOMEM;
 
 	/* First, grab a route. */
-	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
+	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
+					       IPPROTO_TCP)) == NULL)
 		goto done;
 
-	skb = tcp_make_synack(sk, dst, req, foc);
+	skb = tcp_make_synack(sk, dst, req, foc, attach_req);
 
 	if (skb) {
 		__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -460,7 +461,6 @@
 		if (np->repflow && ireq->pktopts)
 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 
-		skb_set_queue_mapping(skb, queue_mapping);
 		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
 		err = net_xmit_eval(err);
 	}
@@ -621,8 +621,12 @@
 	return 1;
 }
 
-static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
+#endif
+
+static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
+				    const struct sk_buff *skb)
 {
+#ifdef CONFIG_TCP_MD5SIG
 	const __u8 *hash_location = NULL;
 	struct tcp_md5sig_key *hash_expected;
 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
@@ -659,9 +663,9 @@
 				     &ip6h->daddr, ntohs(th->dest));
 		return true;
 	}
+#endif
 	return false;
 }
-#endif
 
 static void tcp_v6_init_req(struct request_sock *req,
 			    const struct sock *sk_listener,
@@ -688,13 +692,14 @@
 	}
 }
 
-static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
+static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
+					  struct flowi *fl,
 					  const struct request_sock *req,
 					  bool *strict)
 {
 	if (strict)
 		*strict = true;
-	return inet6_csk_route_req(sk, &fl->u.ip6, req);
+	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
 }
 
 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
@@ -721,10 +726,9 @@
 	.route_req	=	tcp_v6_route_req,
 	.init_seq	=	tcp_v6_init_sequence,
 	.send_synack	=	tcp_v6_send_synack,
-	.queue_hash_add =	inet6_csk_reqsk_queue_hash_add,
 };
 
-static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
+static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
 				 u32 ack, u32 win, u32 tsval, u32 tsecr,
 				 int oif, struct tcp_md5sig_key *key, int rst,
 				 u8 tclass, u32 label)
@@ -823,7 +827,7 @@
 	kfree_skb(buff);
 }
 
-static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
+static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
 {
 	const struct tcphdr *th = tcp_hdr(skb);
 	u32 seq = 0, ack_seq = 0;
@@ -894,7 +898,7 @@
 #endif
 }
 
-static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
+static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
 			    u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
 			    struct tcp_md5sig_key *key, u8 tclass,
 			    u32 label)
@@ -917,7 +921,7 @@
 	inet_twsk_put(tw);
 }
 
-static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
 				  struct request_sock *req)
 {
 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
@@ -925,44 +929,18 @@
 	 */
 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
-			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
+			tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
 			0, 0);
 }
 
 
-static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
+static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
 {
-	const struct tcphdr *th = tcp_hdr(skb);
-	struct request_sock *req;
-	struct sock *nsk;
-
-	/* Find possible connection requests. */
-	req = inet6_csk_search_req(sk, th->source,
-				   &ipv6_hdr(skb)->saddr,
-				   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
-	if (req) {
-		nsk = tcp_check_req(sk, skb, req, false);
-		if (!nsk || nsk == sk)
-			reqsk_put(req);
-		return nsk;
-	}
-	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
-					 &ipv6_hdr(skb)->saddr, th->source,
-					 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
-					 tcp_v6_iif(skb));
-
-	if (nsk) {
-		if (nsk->sk_state != TCP_TIME_WAIT) {
-			bh_lock_sock(nsk);
-			return nsk;
-		}
-		inet_twsk_put(inet_twsk(nsk));
-		return NULL;
-	}
-
 #ifdef CONFIG_SYN_COOKIES
+	const struct tcphdr *th = tcp_hdr(skb);
+
 	if (!th->syn)
 		sk = cookie_v6_check(sk, skb);
 #endif
@@ -985,12 +963,15 @@
 	return 0; /* don't send reset */
 }
 
-static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
 					 struct request_sock *req,
-					 struct dst_entry *dst)
+					 struct dst_entry *dst,
+					 struct request_sock *req_unhash,
+					 bool *own_req)
 {
 	struct inet_request_sock *ireq;
-	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
+	struct ipv6_pinfo *newnp;
+	const struct ipv6_pinfo *np = inet6_sk(sk);
 	struct tcp6_sock *newtcp6sk;
 	struct inet_sock *newinet;
 	struct tcp_sock *newtp;
@@ -1005,7 +986,8 @@
 		 *	v6 mapped
 		 */
 
-		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
+		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
+					     req_unhash, own_req);
 
 		if (!newsk)
 			return NULL;
@@ -1058,7 +1040,7 @@
 		goto out_overflow;
 
 	if (!dst) {
-		dst = inet6_csk_route_req(sk, &fl6, req);
+		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
 		if (!dst)
 			goto out;
 	}
@@ -1166,7 +1148,7 @@
 		tcp_done(newsk);
 		goto out;
 	}
-	__inet_hash(newsk, NULL);
+	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 
 	return newsk;
 
@@ -1180,7 +1162,7 @@
 }
 
 /* The socket must have it's spinlock held when we get
- * here.
+ * here, unless it is a TCP_LISTEN socket.
  *
  * We have a potential double-lock case here, so even when
  * doing backlog processing we use the BH locking scheme.
@@ -1251,18 +1233,14 @@
 		goto csum_err;
 
 	if (sk->sk_state == TCP_LISTEN) {
-		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
+		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
+
 		if (!nsk)
 			goto discard;
 
-		/*
-		 * Queue it on the new socket if the new socket is active,
-		 * otherwise we just shortcircuit this and continue with
-		 * the new socket..
-		 */
 		if (nsk != sk) {
 			sock_rps_save_rxhash(nsk, skb);
-			sk_mark_napi_id(sk, skb);
+			sk_mark_napi_id(nsk, skb);
 			if (tcp_child_process(sk, nsk, skb))
 				goto reset;
 			if (opt_skb)
@@ -1272,7 +1250,7 @@
 	} else
 		sock_rps_save_rxhash(sk, skb);
 
-	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
+	if (tcp_rcv_state_process(sk, skb))
 		goto reset;
 	if (opt_skb)
 		goto ipv6_pktoptions;
@@ -1386,6 +1364,7 @@
 	th = tcp_hdr(skb);
 	hdr = ipv6_hdr(skb);
 
+lookup:
 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
 				inet6_iif(skb));
 	if (!sk)
@@ -1395,6 +1374,37 @@
 	if (sk->sk_state == TCP_TIME_WAIT)
 		goto do_time_wait;
 
+	if (sk->sk_state == TCP_NEW_SYN_RECV) {
+		struct request_sock *req = inet_reqsk(sk);
+		struct sock *nsk = NULL;
+
+		sk = req->rsk_listener;
+		tcp_v6_fill_cb(skb, hdr, th);
+		if (tcp_v6_inbound_md5_hash(sk, skb)) {
+			reqsk_put(req);
+			goto discard_it;
+		}
+		if (likely(sk->sk_state == TCP_LISTEN)) {
+			nsk = tcp_check_req(sk, skb, req, false);
+		} else {
+			inet_csk_reqsk_queue_drop_and_put(sk, req);
+			goto lookup;
+		}
+		if (!nsk) {
+			reqsk_put(req);
+			goto discard_it;
+		}
+		if (nsk == sk) {
+			sock_hold(sk);
+			reqsk_put(req);
+			tcp_v6_restore_cb(skb);
+		} else if (tcp_child_process(sk, nsk, skb)) {
+			tcp_v6_send_reset(nsk, skb);
+			goto discard_it;
+		} else {
+			return 0;
+		}
+	}
 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 		goto discard_and_relse;
@@ -1405,17 +1415,21 @@
 
 	tcp_v6_fill_cb(skb, hdr, th);
 
-#ifdef CONFIG_TCP_MD5SIG
 	if (tcp_v6_inbound_md5_hash(sk, skb))
 		goto discard_and_relse;
-#endif
 
 	if (sk_filter(sk, skb))
 		goto discard_and_relse;
 
-	sk_incoming_cpu_update(sk);
 	skb->dev = NULL;
 
+	if (sk->sk_state == TCP_LISTEN) {
+		ret = tcp_v6_do_rcv(sk, skb);
+		goto put_and_return;
+	}
+
+	sk_incoming_cpu_update(sk);
+
 	bh_lock_sock_nested(sk);
 	tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
 	ret = 0;
@@ -1430,6 +1444,7 @@
 	}
 	bh_unlock_sock(sk);
 
+put_and_return:
 	sock_put(sk);
 	return ret ? -1 : 0;
 
@@ -1630,7 +1645,7 @@
 #ifdef CONFIG_PROC_FS
 /* Proc filesystem TCPv6 sock list dumping. */
 static void get_openreq6(struct seq_file *seq,
-			 struct request_sock *req, int i, kuid_t uid)
+			 const struct request_sock *req, int i)
 {
 	long ttd = req->rsk_timer.expires - jiffies;
 	const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
@@ -1654,7 +1669,8 @@
 		   1,   /* timers active (only the expire timer) */
 		   jiffies_to_clock_t(ttd),
 		   req->num_timeout,
-		   from_kuid_munged(seq_user_ns(seq), uid),
+		   from_kuid_munged(seq_user_ns(seq),
+				    sock_i_uid(req->rsk_listener)),
 		   0,  /* non standard timer */
 		   0, /* open_requests have no inode */
 		   0, req);
@@ -1669,7 +1685,7 @@
 	const struct inet_sock *inet = inet_sk(sp);
 	const struct tcp_sock *tp = tcp_sk(sp);
 	const struct inet_connection_sock *icsk = inet_csk(sp);
-	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
+	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
 
 	dest  = &sp->sk_v6_daddr;
 	src   = &sp->sk_v6_rcv_saddr;
@@ -1713,7 +1729,7 @@
 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
 		   tp->snd_cwnd,
 		   sp->sk_state == TCP_LISTEN ?
-			(fastopenq ? fastopenq->max_qlen : 0) :
+			fastopenq->max_qlen :
 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
 		   );
 }
@@ -1759,18 +1775,12 @@
 	}
 	st = seq->private;
 
-	switch (st->state) {
-	case TCP_SEQ_STATE_LISTENING:
-	case TCP_SEQ_STATE_ESTABLISHED:
-		if (sk->sk_state == TCP_TIME_WAIT)
-			get_timewait6_sock(seq, v, st->num);
-		else
-			get_tcp6_sock(seq, v, st->num);
-		break;
-	case TCP_SEQ_STATE_OPENREQ:
-		get_openreq6(seq, v, st->num, st->uid);
-		break;
-	}
+	if (sk->sk_state == TCP_TIME_WAIT)
+		get_timewait6_sock(seq, v, st->num);
+	else if (sk->sk_state == TCP_NEW_SYN_RECV)
+		get_openreq6(seq, v, st->num);
+	else
+		get_tcp6_sock(seq, v, st->num);
 out:
 	return 0;
 }
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 0aba654..01bcb49 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -182,10 +182,12 @@
 		score++;
 	}
 
+	if (sk->sk_incoming_cpu == raw_smp_processor_id())
+		score++;
+
 	return score;
 }
 
-#define SCORE2_MAX (1 + 1 + 1)
 static inline int compute_score2(struct sock *sk, struct net *net,
 				 const struct in6_addr *saddr, __be16 sport,
 				 const struct in6_addr *daddr,
@@ -223,6 +225,9 @@
 		score++;
 	}
 
+	if (sk->sk_incoming_cpu == raw_smp_processor_id())
+		score++;
+
 	return score;
 }
 
@@ -251,8 +256,7 @@
 				hash = udp6_ehashfn(net, daddr, hnum,
 						    saddr, sport);
 				matches = 1;
-			} else if (score == SCORE2_MAX)
-				goto exact_match;
+			}
 		} else if (score == badness && reuseport) {
 			matches++;
 			if (reciprocal_scale(hash, matches) == 0)
@@ -269,7 +273,6 @@
 		goto begin;
 
 	if (result) {
-exact_match:
 		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 			result = NULL;
 		else if (unlikely(compute_score2(result, net, saddr, sport,
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 0c3e9ff..4d09ce6 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -79,6 +79,7 @@
 
 	if (!skb->ignore_df && skb->len > mtu) {
 		skb->dev = dst->dev;
+		skb->protocol = htons(ETH_P_IPV6);
 
 		if (xfrm6_local_dontfrag(skb))
 			xfrm6_local_rxpmtu(skb, mtu);
@@ -131,45 +132,55 @@
 	return xfrm_output(sk, skb);
 }
 
+static int __xfrm6_output_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+	struct xfrm_state *x = skb_dst(skb)->xfrm;
+
+	return x->outer_mode->afinfo->output_finish(sk, skb);
+}
+
 static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct dst_entry *dst = skb_dst(skb);
 	struct xfrm_state *x = dst->xfrm;
 	int mtu;
+	bool toobig;
 
 #ifdef CONFIG_NETFILTER
 	if (!x) {
 		IP6CB(skb)->flags |= IP6SKB_REROUTED;
-		return dst_output(sk, skb);
+		return dst_output(net, sk, skb);
 	}
 #endif
 
+	if (x->props.mode != XFRM_MODE_TUNNEL)
+		goto skip_frag;
+
 	if (skb->protocol == htons(ETH_P_IPV6))
 		mtu = ip6_skb_dst_mtu(skb);
 	else
 		mtu = dst_mtu(skb_dst(skb));
 
-	if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
+	toobig = skb->len > mtu && !skb_is_gso(skb);
+
+	if (toobig && xfrm6_local_dontfrag(skb)) {
 		xfrm6_local_rxpmtu(skb, mtu);
 		return -EMSGSIZE;
-	} else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
+	} else if (!skb->ignore_df && toobig && skb->sk) {
 		xfrm_local_error(skb, mtu);
 		return -EMSGSIZE;
 	}
 
-	if (x->props.mode == XFRM_MODE_TUNNEL &&
-	    ((skb->len > mtu && !skb_is_gso(skb)) ||
-		dst_allfrag(skb_dst(skb)))) {
-		return ip6_fragment(sk, skb,
-				    x->outer_mode->afinfo->output_finish);
-	}
+	if (toobig || dst_allfrag(skb_dst(skb)))
+		return ip6_fragment(net, sk, skb,
+				    __xfrm6_output_finish);
+
+skip_frag:
 	return x->outer_mode->afinfo->output_finish(sk, skb);
 }
 
-int xfrm6_output(struct sock *sk, struct sk_buff *skb)
+int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
-
 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 			    net, sk, skb,  NULL, skb_dst(skb)->dev,
 			    __xfrm6_output,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 2fad593..5643423 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -20,7 +20,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
-#include <net/vrf.h>
+#include <net/l3mdev.h>
 #if IS_ENABLED(CONFIG_IPV6_MIP6)
 #include <net/mip6.h>
 #endif
@@ -37,6 +37,7 @@
 
 	memset(&fl6, 0, sizeof(fl6));
 	fl6.flowi6_oif = oif;
+	fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
 	memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
 	if (saddr)
 		memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
@@ -132,10 +133,8 @@
 
 	nexthdr = nh[nhoff];
 
-	if (skb_dst(skb)) {
-		oif = vrf_master_ifindex(skb_dst(skb)->dev) ?
-			: skb_dst(skb)->dev->ifindex;
-	}
+	if (skb_dst(skb))
+		oif = l3mdev_fib_oif(skb_dst(skb)->dev);
 
 	memset(fl6, 0, sizeof(struct flowi6));
 	fl6->flowi6_mark = skb->mark;
@@ -178,7 +177,8 @@
 			return;
 
 		case IPPROTO_ICMPV6:
-			if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
+			if (!onlyproto && (nh + offset + 2 < skb->data ||
+			    pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
 				u8 *icmp;
 
 				nh = skb_network_header(skb);
@@ -192,7 +192,8 @@
 #if IS_ENABLED(CONFIG_IPV6_MIP6)
 		case IPPROTO_MH:
 			offset += ipv6_optlen(exthdr);
-			if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
+			if (!onlyproto && (nh + offset + 3 < skb->data ||
+			    pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
 				struct ip6_mh *mh;
 
 				nh = skb_network_header(skb);
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index a26c401..4396459 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -1839,7 +1839,7 @@
 	for (element = hashbin_get_first(iter->hashbin);
 	     element != NULL;
 	     element = hashbin_get_next(iter->hashbin)) {
-		if (!off || *off-- == 0) {
+		if (!off || (*off)-- == 0) {
 			/* NB: hashbin left locked */
 			return element;
 		}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 83a7068..f9c9ecb 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -261,7 +261,7 @@
 
 		err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
 
-		/* Error is cleare after succecful sending to at least one
+		/* Error is cleared after successful sending to at least one
 		 * registered KM */
 		if ((broadcast_flags & BROADCAST_REGISTERED) && err)
 			err = err2;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index f6b090d..afca2eb 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1319,7 +1319,7 @@
 	tunnel = container_of(work, struct l2tp_tunnel, del_work);
 	sk = l2tp_tunnel_sock_lookup(tunnel);
 	if (!sk)
-		return;
+		goto out;
 
 	sock = sk->sk_socket;
 
@@ -1341,6 +1341,8 @@
 	}
 
 	l2tp_tunnel_sock_put(sk);
+out:
+	l2tp_tunnel_dec_refcount(tunnel);
 }
 
 /* Create a socket for the tunnel, if one isn't set up by
@@ -1636,8 +1638,13 @@
  */
 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
 {
+	l2tp_tunnel_inc_refcount(tunnel);
 	l2tp_tunnel_closeall(tunnel);
-	return (false == queue_work(l2tp_wq, &tunnel->del_work));
+	if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
+		l2tp_tunnel_dec_refcount(tunnel);
+		return 1;
+	}
+	return 0;
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
 
diff --git a/net/l3mdev/Kconfig b/net/l3mdev/Kconfig
new file mode 100644
index 0000000..5d47325
--- /dev/null
+++ b/net/l3mdev/Kconfig
@@ -0,0 +1,10 @@
+#
+# Configuration for L3 master device support
+#
+
+config NET_L3_MASTER_DEV
+	bool "L3 Master device support"
+	depends on INET || IPV6
+	---help---
+	  This module provides glue between core networking code and device
+	  drivers to support L3 master devices like VRF.
diff --git a/net/l3mdev/Makefile b/net/l3mdev/Makefile
new file mode 100644
index 0000000..84a53a6
--- /dev/null
+++ b/net/l3mdev/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the L3 device API
+#
+
+obj-$(CONFIG_NET_L3_MASTER_DEV) += l3mdev.o
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
new file mode 100644
index 0000000..8e5ead3
--- /dev/null
+++ b/net/l3mdev/l3mdev.c
@@ -0,0 +1,92 @@
+/*
+ * net/l3mdev/l3mdev.c - L3 master device implementation
+ * Copyright (c) 2015 Cumulus Networks
+ * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/netdevice.h>
+#include <net/l3mdev.h>
+
+/**
+ *	l3mdev_master_ifindex - get index of L3 master device
+ *	@dev: targeted interface
+ */
+
+int l3mdev_master_ifindex_rcu(struct net_device *dev)
+{
+	int ifindex = 0;
+
+	if (!dev)
+		return 0;
+
+	if (netif_is_l3_master(dev)) {
+		ifindex = dev->ifindex;
+	} else if (netif_is_l3_slave(dev)) {
+		struct net_device *master;
+
+		master = netdev_master_upper_dev_get_rcu(dev);
+		if (master)
+			ifindex = master->ifindex;
+	}
+
+	return ifindex;
+}
+EXPORT_SYMBOL_GPL(l3mdev_master_ifindex_rcu);
+
+/**
+ *	l3mdev_fib_table - get FIB table id associated with an L3
+ *                             master interface
+ *	@dev: targeted interface
+ */
+
+u32 l3mdev_fib_table_rcu(const struct net_device *dev)
+{
+	u32 tb_id = 0;
+
+	if (!dev)
+		return 0;
+
+	if (netif_is_l3_master(dev)) {
+		if (dev->l3mdev_ops->l3mdev_fib_table)
+			tb_id = dev->l3mdev_ops->l3mdev_fib_table(dev);
+	} else if (netif_is_l3_slave(dev)) {
+		/* Users of netdev_master_upper_dev_get_rcu need non-const,
+		 * but current inet_*type functions take a const
+		 */
+		struct net_device *_dev = (struct net_device *) dev;
+		const struct net_device *master;
+
+		master = netdev_master_upper_dev_get_rcu(_dev);
+		if (master &&
+		    master->l3mdev_ops->l3mdev_fib_table)
+			tb_id = master->l3mdev_ops->l3mdev_fib_table(master);
+	}
+
+	return tb_id;
+}
+EXPORT_SYMBOL_GPL(l3mdev_fib_table_rcu);
+
+u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
+{
+	struct net_device *dev;
+	u32 tb_id = 0;
+
+	if (!ifindex)
+		return 0;
+
+	rcu_read_lock();
+
+	dev = dev_get_by_index_rcu(net, ifindex);
+	if (dev)
+		tb_id = l3mdev_fib_table_rcu(dev);
+
+	rcu_read_unlock();
+
+	return tb_id;
+}
+EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index);
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 783e891..f9137a8 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -27,7 +27,6 @@
 	key.o \
 	util.o \
 	wme.o \
-	event.o \
 	chan.o \
 	trace.o mlme.o \
 	tdls.o \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 5c564a6..10ad4ac 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -79,7 +79,7 @@
 	       (int)reason);
 
 	if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
-			     &sta->sta, tid, NULL, 0))
+			     &sta->sta, tid, NULL, 0, false))
 		sdata_info(sta->sdata,
 			   "HW problem - can not stop rx aggregation for %pM tid %d\n",
 			   sta->sta.addr, tid);
@@ -189,6 +189,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *skb;
 	struct ieee80211_mgmt *mgmt;
+	bool amsdu = ieee80211_hw_check(&local->hw, SUPPORTS_AMSDU_IN_AMPDU);
 	u16 capab;
 
 	skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
@@ -217,7 +218,8 @@
 	mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP;
 	mgmt->u.action.u.addba_resp.dialog_token = dialog_token;
 
-	capab = (u16)(policy << 1);	/* bit 1 aggregation policy */
+	capab = (u16)(amsdu << 0);	/* bit 0 A-MSDU support */
+	capab |= (u16)(policy << 1);	/* bit 1 aggregation policy */
 	capab |= (u16)(tid << 2); 	/* bit 5:2 TID number */
 	capab |= (u16)(buf_size << 6);	/* bit 15:6 max size of aggregation */
 
@@ -321,7 +323,7 @@
 		__skb_queue_head_init(&tid_agg_rx->reorder_buf[i]);
 
 	ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
-			       &sta->sta, tid, &start_seq_num, 0);
+			       &sta->sta, tid, &start_seq_num, 0, false);
 	ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
 	       sta->sta.addr, tid, ret);
 	if (ret) {
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c8ba2e7..a758eb84 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -97,7 +97,8 @@
 	mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
 
 	mgmt->u.action.u.addba_req.dialog_token = dialog_token;
-	capab = (u16)(1 << 1);		/* bit 1 aggregation policy */
+	capab = (u16)(1 << 0);		/* bit 0 A-MSDU support */
+	capab |= (u16)(1 << 1);		/* bit 1 aggregation policy */
 	capab |= (u16)(tid << 2); 	/* bit 5:2 TID number */
 	capab |= (u16)(agg_size << 6);	/* bit 15:6 max size of aggergation */
 
@@ -331,7 +332,7 @@
 			return -EALREADY;
 		ret = drv_ampdu_action(local, sta->sdata,
 				       IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
-				       &sta->sta, tid, NULL, 0);
+				       &sta->sta, tid, NULL, 0, false);
 		WARN_ON_ONCE(ret);
 		return 0;
 	}
@@ -381,7 +382,7 @@
 	tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
 
 	ret = drv_ampdu_action(local, sta->sdata, action,
-			       &sta->sta, tid, NULL, 0);
+			       &sta->sta, tid, NULL, 0, false);
 
 	/* HW shall not deny going back to legacy */
 	if (WARN_ON(ret)) {
@@ -469,7 +470,7 @@
 	start_seq_num = sta->tid_seq[tid] >> 4;
 
 	ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
-			       &sta->sta, tid, &start_seq_num, 0);
+			       &sta->sta, tid, &start_seq_num, 0, false);
 	if (ret) {
 		ht_dbg(sdata,
 		       "BA request denied - HW unavailable for %pM tid %d\n",
@@ -693,7 +694,8 @@
 
 	drv_ampdu_action(local, sta->sdata,
 			 IEEE80211_AMPDU_TX_OPERATIONAL,
-			 &sta->sta, tid, NULL, tid_tx->buf_size);
+			 &sta->sta, tid, NULL, tid_tx->buf_size,
+			 tid_tx->amsdu);
 
 	/*
 	 * synchronize with TX path, while splicing the TX path
@@ -918,8 +920,10 @@
 	struct tid_ampdu_tx *tid_tx;
 	u16 capab, tid;
 	u8 buf_size;
+	bool amsdu;
 
 	capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
+	amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
 	tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
 	buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
 
@@ -968,6 +972,7 @@
 		}
 
 		tid_tx->buf_size = buf_size;
+		tid_tx->amsdu = amsdu;
 
 		if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
 			ieee80211_agg_tx_operational(local, sta, tid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 7a77a14..713cdbf 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -17,7 +17,6 @@
 #include <net/cfg80211.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
-#include "cfg.h"
 #include "rate.h"
 #include "mesh.h"
 #include "wme.h"
@@ -469,45 +468,6 @@
 		rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
 }
 
-void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
-{
-	rinfo->flags = 0;
-
-	if (sta->last_rx_rate_flag & RX_FLAG_HT) {
-		rinfo->flags |= RATE_INFO_FLAGS_MCS;
-		rinfo->mcs = sta->last_rx_rate_idx;
-	} else if (sta->last_rx_rate_flag & RX_FLAG_VHT) {
-		rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
-		rinfo->nss = sta->last_rx_rate_vht_nss;
-		rinfo->mcs = sta->last_rx_rate_idx;
-	} else {
-		struct ieee80211_supported_band *sband;
-		int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
-		u16 brate;
-
-		sband = sta->local->hw.wiphy->bands[
-				ieee80211_get_sdata_band(sta->sdata)];
-		brate = sband->bitrates[sta->last_rx_rate_idx].bitrate;
-		rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
-	}
-
-	if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
-		rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
-
-	if (sta->last_rx_rate_flag & RX_FLAG_5MHZ)
-		rinfo->bw = RATE_INFO_BW_5;
-	else if (sta->last_rx_rate_flag & RX_FLAG_10MHZ)
-		rinfo->bw = RATE_INFO_BW_10;
-	else if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
-		rinfo->bw = RATE_INFO_BW_40;
-	else if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80MHZ)
-		rinfo->bw = RATE_INFO_BW_80;
-	else if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_160MHZ)
-		rinfo->bw = RATE_INFO_BW_160;
-	else
-		rinfo->bw = RATE_INFO_BW_20;
-}
-
 static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
 				  int idx, u8 *mac, struct station_info *sinfo)
 {
@@ -981,7 +941,7 @@
 		 * well. Some drivers require rate control initialized
 		 * before drv_sta_state() is called.
 		 */
-		if (test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+		if (!test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
 			rate_control_rate_init(sta);
 
 		ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
@@ -1120,8 +1080,11 @@
 	    local->hw.queues >= IEEE80211_NUM_ACS)
 		sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME);
 
-	/* auth flags will be set later for TDLS stations */
-	if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+	/* auth flags will be set later for TDLS,
+	 * and for unassociated stations that move to assocaited */
+	if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+	    !((mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) &&
+	      (set & BIT(NL80211_STA_FLAG_ASSOCIATED)))) {
 		ret = sta_apply_auth_flags(local, sta, mask, set);
 		if (ret)
 			return ret;
@@ -1135,6 +1098,7 @@
 	}
 
 	if (mask & BIT(NL80211_STA_FLAG_MFP)) {
+		sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP));
 		if (set & BIT(NL80211_STA_FLAG_MFP))
 			set_sta_flag(sta, WLAN_STA_MFP);
 		else
@@ -1156,6 +1120,7 @@
 		set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH);
 
 	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+	    !sdata->u.mgd.tdls_wider_bw_prohibited &&
 	    ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
 	    params->ext_capab_len >= 8 &&
 	    params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED)
@@ -1212,7 +1177,8 @@
 		sta_apply_mesh_params(local, sta, params);
 
 	/* set the STA state after all sta info from usermode has been set */
-	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) ||
+	    set & BIT(NL80211_STA_FLAG_ASSOCIATED)) {
 		ret = sta_apply_auth_flags(local, sta, mask, set);
 		if (ret)
 			return ret;
@@ -1254,12 +1220,14 @@
 	 * defaults -- if userspace wants something else we'll
 	 * change it accordingly in sta_apply_parameters()
 	 */
-	if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) {
+	if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
+	    !(params->sta_flags_set & (BIT(NL80211_STA_FLAG_AUTHENTICATED) |
+					BIT(NL80211_STA_FLAG_ASSOCIATED)))) {
 		sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
 		sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
-	} else {
-		sta->sta.tdls = true;
 	}
+	if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+		sta->sta.tdls = true;
 
 	err = sta_apply_parameters(local, sta, params);
 	if (err) {
@@ -1268,10 +1236,12 @@
 	}
 
 	/*
-	 * for TDLS, rate control should be initialized only when
-	 * rates are known and station is marked authorized
+	 * for TDLS and for unassociated station, rate control should be
+	 * initialized only when rates are known and station is marked
+	 * authorized/associated
 	 */
-	if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER))
+	if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+	    test_sta_flag(sta, WLAN_STA_ASSOC))
 		rate_control_rate_init(sta);
 
 	layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
@@ -1346,7 +1316,10 @@
 		break;
 	case NL80211_IFTYPE_AP:
 	case NL80211_IFTYPE_AP_VLAN:
-		statype = CFG80211_STA_AP_CLIENT;
+		if (test_sta_flag(sta, WLAN_STA_ASSOC))
+			statype = CFG80211_STA_AP_CLIENT;
+		else
+			statype = CFG80211_STA_AP_CLIENT_UNASSOC;
 		break;
 	default:
 		err = -EOPNOTSUPP;
@@ -1415,7 +1388,7 @@
 
 	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
 	    params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
-		ieee80211_recalc_ps(local, -1);
+		ieee80211_recalc_ps(local);
 		ieee80211_recalc_ps_vif(sdata);
 	}
 
@@ -2450,7 +2423,7 @@
 	if (ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
 		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
 
-	ieee80211_recalc_ps(local, -1);
+	ieee80211_recalc_ps(local);
 	ieee80211_recalc_ps_vif(sdata);
 
 	return 0;
@@ -3522,18 +3495,32 @@
 					  u16 frame_type, bool reg)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
+	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
 
 	switch (frame_type) {
 	case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ:
-		if (reg)
+		if (reg) {
 			local->probe_req_reg++;
-		else
-			local->probe_req_reg--;
+			sdata->vif.probe_req_reg++;
+		} else {
+			if (local->probe_req_reg)
+				local->probe_req_reg--;
+
+			if (sdata->vif.probe_req_reg)
+				sdata->vif.probe_req_reg--;
+		}
 
 		if (!local->open_count)
 			break;
 
-		ieee80211_queue_work(&local->hw, &local->reconfig_filter);
+		if (sdata->vif.probe_req_reg == 1)
+			drv_config_iface_filter(local, sdata, FIF_PROBE_REQ,
+						FIF_PROBE_REQ);
+		else if (sdata->vif.probe_req_reg == 0)
+			drv_config_iface_filter(local, sdata, 0,
+						FIF_PROBE_REQ);
+
+		ieee80211_configure_filter(local);
 		break;
 	default:
 		break;
diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
deleted file mode 100644
index 2d51f62..0000000
--- a/net/mac80211/cfg.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * mac80211 configuration hooks for cfg80211
- */
-#ifndef __CFG_H
-#define __CFG_H
-
-extern const struct cfg80211_ops mac80211_config_ops;
-
-#endif /* __CFG_H */
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index ced6bf3..4d2aaeb 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -123,6 +123,8 @@
 	FLAG(SUPPORTS_CLONED_SKBS),
 	FLAG(SINGLE_SCAN_ON_ALL_BANDS),
 	FLAG(TDLS_WIDER_BW),
+	FLAG(SUPPORTS_AMSDU_IN_AMPDU),
+	FLAG(BEACON_TX_STATUS),
 
 	/* keep last for the build bug below */
 	(void *)0x1
@@ -149,7 +151,7 @@
 
 	for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) {
 		if (test_bit(i, local->hw.flags))
-			pos += scnprintf(pos, end - pos, "%s",
+			pos += scnprintf(pos, end - pos, "%s\n",
 					 hw_flag_names[i]);
 	}
 
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 702ca12..7961e7d0 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -2,6 +2,7 @@
  * Copyright 2003-2005	Devicescape Software, Inc.
  * Copyright (c) 2006	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2015	Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -34,6 +35,14 @@
 	.llseek = generic_file_llseek,					\
 }
 
+#define KEY_OPS_W(name)							\
+static const struct file_operations key_ ##name## _ops = {		\
+	.read = key_##name##_read,					\
+	.write = key_##name##_write,					\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+}
+
 #define KEY_FILE(name, format)						\
 		 KEY_READ_##format(name)				\
 		 KEY_OPS(name)
@@ -74,6 +83,41 @@
 }
 KEY_OPS(algorithm);
 
+static ssize_t key_tx_spec_write(struct file *file, const char __user *userbuf,
+				 size_t count, loff_t *ppos)
+{
+	struct ieee80211_key *key = file->private_data;
+	u64 pn;
+	int ret;
+
+	switch (key->conf.cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		return -EINVAL;
+	case WLAN_CIPHER_SUITE_TKIP:
+		/* not supported yet */
+		return -EOPNOTSUPP;
+	case WLAN_CIPHER_SUITE_CCMP:
+	case WLAN_CIPHER_SUITE_CCMP_256:
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+	case WLAN_CIPHER_SUITE_GCMP:
+	case WLAN_CIPHER_SUITE_GCMP_256:
+		ret = kstrtou64_from_user(userbuf, count, 16, &pn);
+		if (ret)
+			return ret;
+		/* PN is a 48-bit counter */
+		if (pn >= (1ULL << 48))
+			return -ERANGE;
+		atomic64_set(&key->conf.tx_pn, pn);
+		return count;
+	default:
+		return 0;
+	}
+}
+
 static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
 				size_t count, loff_t *ppos)
 {
@@ -110,7 +154,7 @@
 	}
 	return simple_read_from_buffer(userbuf, count, ppos, buf, len);
 }
-KEY_OPS(tx_spec);
+KEY_OPS_W(tx_spec);
 
 static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
 				size_t count, loff_t *ppos)
@@ -278,6 +322,9 @@
 #define DEBUGFS_ADD(name) \
 	debugfs_create_file(#name, 0400, key->debugfs.dir, \
 			    key, &key_##name##_ops);
+#define DEBUGFS_ADD_W(name) \
+	debugfs_create_file(#name, 0600, key->debugfs.dir, \
+			    key, &key_##name##_ops);
 
 void ieee80211_debugfs_key_add(struct ieee80211_key *key)
 {
@@ -310,7 +357,7 @@
 	DEBUGFS_ADD(keyidx);
 	DEBUGFS_ADD(hw_key_idx);
 	DEBUGFS_ADD(algorithm);
-	DEBUGFS_ADD(tx_spec);
+	DEBUGFS_ADD_W(tx_spec);
 	DEBUGFS_ADD(rx_spec);
 	DEBUGFS_ADD(replays);
 	DEBUGFS_ADD(icverrors);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 1021e87..37ea30e 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -114,14 +114,6 @@
 	return scnprintf(buf, buflen, "%pM\n", sdata->field);		\
 }
 
-#define IEEE80211_IF_FMT_DEC_DIV_16(name, field)			\
-static ssize_t ieee80211_if_fmt_##name(					\
-	const struct ieee80211_sub_if_data *sdata,			\
-	char *buf, int buflen)						\
-{									\
-	return scnprintf(buf, buflen, "%d\n", sdata->field / 16);	\
-}
-
 #define IEEE80211_IF_FMT_JIFFIES_TO_MS(name, field)			\
 static ssize_t ieee80211_if_fmt_##name(					\
 	const struct ieee80211_sub_if_data *sdata,			\
@@ -247,8 +239,6 @@
 /* STA attributes */
 IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
 IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
-IEEE80211_IF_FILE(last_beacon, u.mgd.last_beacon_signal, DEC);
-IEEE80211_IF_FILE(ave_beacon, u.mgd.ave_beacon_signal, DEC_DIV_16);
 IEEE80211_IF_FILE(beacon_timeout, u.mgd.beacon_timeout, JIFFIES_TO_MS);
 
 static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
@@ -455,6 +445,34 @@
 }
 IEEE80211_IF_FILE_RW(uapsd_max_sp_len);
 
+static ssize_t ieee80211_if_fmt_tdls_wider_bw(
+	const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+	const struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+	bool tdls_wider_bw;
+
+	tdls_wider_bw = ieee80211_hw_check(&sdata->local->hw, TDLS_WIDER_BW) &&
+			!ifmgd->tdls_wider_bw_prohibited;
+
+	return snprintf(buf, buflen, "%d\n", tdls_wider_bw);
+}
+
+static ssize_t ieee80211_if_parse_tdls_wider_bw(
+	struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
+{
+	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+	u8 val;
+	int ret;
+
+	ret = kstrtou8(buf, 0, &val);
+	if (ret)
+		return ret;
+
+	ifmgd->tdls_wider_bw_prohibited = !val;
+	return buflen;
+}
+IEEE80211_IF_FILE_RW(tdls_wider_bw);
+
 /* AP attributes */
 IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC);
 IEEE80211_IF_FILE(num_sta_ps, u.ap.ps.num_sta_ps, ATOMIC);
@@ -606,14 +624,13 @@
 {
 	DEBUGFS_ADD(bssid);
 	DEBUGFS_ADD(aid);
-	DEBUGFS_ADD(last_beacon);
-	DEBUGFS_ADD(ave_beacon);
 	DEBUGFS_ADD(beacon_timeout);
 	DEBUGFS_ADD_MODE(smps, 0600);
 	DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
 	DEBUGFS_ADD_MODE(beacon_loss, 0200);
 	DEBUGFS_ADD_MODE(uapsd_queues, 0600);
 	DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600);
+	DEBUGFS_ADD_MODE(tdls_wider_bw, 0600);
 }
 
 static void add_ap_files(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 06d5293..a39512f 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -50,7 +50,6 @@
 		STA_OPS(name)
 
 STA_FILE(aid, sta.aid, D);
-STA_FILE(last_ack_signal, last_ack_signal, D);
 
 static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
 			      size_t count, loff_t *ppos)
@@ -366,11 +365,10 @@
 	DEBUGFS_ADD(agg_status);
 	DEBUGFS_ADD(ht_capa);
 	DEBUGFS_ADD(vht_capa);
-	DEBUGFS_ADD(last_ack_signal);
 
-	DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates);
-	DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments);
-	DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count);
+	DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates);
+	DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments);
+	DEBUGFS_ADD_COUNTER(tx_filtered, status_stats.filtered);
 
 	if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
 		debugfs_create_x32("driver_buffered_tids", 0400,
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index 267c3b1..a1d5431 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -8,6 +8,60 @@
 #include "trace.h"
 #include "driver-ops.h"
 
+int drv_add_interface(struct ieee80211_local *local,
+		      struct ieee80211_sub_if_data *sdata)
+{
+	int ret;
+
+	might_sleep();
+
+	if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+		    (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+		     !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
+		     !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))))
+		return -EINVAL;
+
+	trace_drv_add_interface(local, sdata);
+	ret = local->ops->add_interface(&local->hw, &sdata->vif);
+	trace_drv_return_int(local, ret);
+
+	if (ret == 0)
+		sdata->flags |= IEEE80211_SDATA_IN_DRIVER;
+
+	return ret;
+}
+
+int drv_change_interface(struct ieee80211_local *local,
+			 struct ieee80211_sub_if_data *sdata,
+			 enum nl80211_iftype type, bool p2p)
+{
+	int ret;
+
+	might_sleep();
+
+	if (!check_sdata_in_driver(sdata))
+		return -EIO;
+
+	trace_drv_change_interface(local, sdata, type, p2p);
+	ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
+	trace_drv_return_int(local, ret);
+	return ret;
+}
+
+void drv_remove_interface(struct ieee80211_local *local,
+			  struct ieee80211_sub_if_data *sdata)
+{
+	might_sleep();
+
+	if (!check_sdata_in_driver(sdata))
+		return;
+
+	trace_drv_remove_interface(local, sdata);
+	local->ops->remove_interface(&local->hw, &sdata->vif);
+	sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
+	trace_drv_return_void(local);
+}
+
 __must_check
 int drv_sta_state(struct ieee80211_local *local,
 		  struct ieee80211_sub_if_data *sdata,
@@ -39,3 +93,171 @@
 	trace_drv_return_int(local, ret);
 	return ret;
 }
+
+void drv_sta_rc_update(struct ieee80211_local *local,
+		       struct ieee80211_sub_if_data *sdata,
+		       struct ieee80211_sta *sta, u32 changed)
+{
+	sdata = get_bss_sdata(sdata);
+	if (!check_sdata_in_driver(sdata))
+		return;
+
+	WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
+		(sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+		 sdata->vif.type != NL80211_IFTYPE_MESH_POINT));
+
+	trace_drv_sta_rc_update(local, sdata, sta, changed);
+	if (local->ops->sta_rc_update)
+		local->ops->sta_rc_update(&local->hw, &sdata->vif,
+					  sta, changed);
+
+	trace_drv_return_void(local);
+}
+
+int drv_conf_tx(struct ieee80211_local *local,
+		struct ieee80211_sub_if_data *sdata, u16 ac,
+		const struct ieee80211_tx_queue_params *params)
+{
+	int ret = -EOPNOTSUPP;
+
+	might_sleep();
+
+	if (!check_sdata_in_driver(sdata))
+		return -EIO;
+
+	if (WARN_ONCE(params->cw_min == 0 ||
+		      params->cw_min > params->cw_max,
+		      "%s: invalid CW_min/CW_max: %d/%d\n",
+		      sdata->name, params->cw_min, params->cw_max))
+		return -EINVAL;
+
+	trace_drv_conf_tx(local, sdata, ac, params);
+	if (local->ops->conf_tx)
+		ret = local->ops->conf_tx(&local->hw, &sdata->vif,
+					  ac, params);
+	trace_drv_return_int(local, ret);
+	return ret;
+}
+
+u64 drv_get_tsf(struct ieee80211_local *local,
+		struct ieee80211_sub_if_data *sdata)
+{
+	u64 ret = -1ULL;
+
+	might_sleep();
+
+	if (!check_sdata_in_driver(sdata))
+		return ret;
+
+	trace_drv_get_tsf(local, sdata);
+	if (local->ops->get_tsf)
+		ret = local->ops->get_tsf(&local->hw, &sdata->vif);
+	trace_drv_return_u64(local, ret);
+	return ret;
+}
+
+void drv_set_tsf(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 u64 tsf)
+{
+	might_sleep();
+
+	if (!check_sdata_in_driver(sdata))
+		return;
+
+	trace_drv_set_tsf(local, sdata, tsf);
+	if (local->ops->set_tsf)
+		local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
+	trace_drv_return_void(local);
+}
+
+void drv_reset_tsf(struct ieee80211_local *local,
+		   struct ieee80211_sub_if_data *sdata)
+{
+	might_sleep();
+
+	if (!check_sdata_in_driver(sdata))
+		return;
+
+	trace_drv_reset_tsf(local, sdata);
+	if (local->ops->reset_tsf)
+		local->ops->reset_tsf(&local->hw, &sdata->vif);
+	trace_drv_return_void(local);
+}
+
+int drv_switch_vif_chanctx(struct ieee80211_local *local,
+			   struct ieee80211_vif_chanctx_switch *vifs,
+			   int n_vifs, enum ieee80211_chanctx_switch_mode mode)
+{
+	int ret = 0;
+	int i;
+
+	if (!local->ops->switch_vif_chanctx)
+		return -EOPNOTSUPP;
+
+	for (i = 0; i < n_vifs; i++) {
+		struct ieee80211_chanctx *new_ctx =
+			container_of(vifs[i].new_ctx,
+				     struct ieee80211_chanctx,
+				     conf);
+		struct ieee80211_chanctx *old_ctx =
+			container_of(vifs[i].old_ctx,
+				     struct ieee80211_chanctx,
+				     conf);
+
+		WARN_ON_ONCE(!old_ctx->driver_present);
+		WARN_ON_ONCE((mode == CHANCTX_SWMODE_SWAP_CONTEXTS &&
+			      new_ctx->driver_present) ||
+			     (mode == CHANCTX_SWMODE_REASSIGN_VIF &&
+			      !new_ctx->driver_present));
+	}
+
+	trace_drv_switch_vif_chanctx(local, vifs, n_vifs, mode);
+	ret = local->ops->switch_vif_chanctx(&local->hw,
+					     vifs, n_vifs, mode);
+	trace_drv_return_int(local, ret);
+
+	if (!ret && mode == CHANCTX_SWMODE_SWAP_CONTEXTS) {
+		for (i = 0; i < n_vifs; i++) {
+			struct ieee80211_chanctx *new_ctx =
+				container_of(vifs[i].new_ctx,
+					     struct ieee80211_chanctx,
+					     conf);
+			struct ieee80211_chanctx *old_ctx =
+				container_of(vifs[i].old_ctx,
+					     struct ieee80211_chanctx,
+					     conf);
+
+			new_ctx->driver_present = true;
+			old_ctx->driver_present = false;
+		}
+	}
+
+	return ret;
+}
+
+int drv_ampdu_action(struct ieee80211_local *local,
+		     struct ieee80211_sub_if_data *sdata,
+		     enum ieee80211_ampdu_mlme_action action,
+		     struct ieee80211_sta *sta, u16 tid,
+		     u16 *ssn, u8 buf_size, bool amsdu)
+{
+	int ret = -EOPNOTSUPP;
+
+	might_sleep();
+
+	sdata = get_bss_sdata(sdata);
+	if (!check_sdata_in_driver(sdata))
+		return -EIO;
+
+	trace_drv_ampdu_action(local, sdata, action, sta, tid,
+			       ssn, buf_size, amsdu);
+
+	if (local->ops->ampdu_action)
+		ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
+					       sta, tid, ssn, buf_size, amsdu);
+
+	trace_drv_return_int(local, ret);
+
+	return ret;
+}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 02d9133..3098709 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -137,59 +137,15 @@
 }
 #endif
 
-static inline int drv_add_interface(struct ieee80211_local *local,
-				    struct ieee80211_sub_if_data *sdata)
-{
-	int ret;
+int drv_add_interface(struct ieee80211_local *local,
+		      struct ieee80211_sub_if_data *sdata);
 
-	might_sleep();
+int drv_change_interface(struct ieee80211_local *local,
+			 struct ieee80211_sub_if_data *sdata,
+			 enum nl80211_iftype type, bool p2p);
 
-	if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
-		    (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
-		     !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
-		     !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))))
-		return -EINVAL;
-
-	trace_drv_add_interface(local, sdata);
-	ret = local->ops->add_interface(&local->hw, &sdata->vif);
-	trace_drv_return_int(local, ret);
-
-	if (ret == 0)
-		sdata->flags |= IEEE80211_SDATA_IN_DRIVER;
-
-	return ret;
-}
-
-static inline int drv_change_interface(struct ieee80211_local *local,
-				       struct ieee80211_sub_if_data *sdata,
-				       enum nl80211_iftype type, bool p2p)
-{
-	int ret;
-
-	might_sleep();
-
-	if (!check_sdata_in_driver(sdata))
-		return -EIO;
-
-	trace_drv_change_interface(local, sdata, type, p2p);
-	ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p);
-	trace_drv_return_int(local, ret);
-	return ret;
-}
-
-static inline void drv_remove_interface(struct ieee80211_local *local,
-					struct ieee80211_sub_if_data *sdata)
-{
-	might_sleep();
-
-	if (!check_sdata_in_driver(sdata))
-		return;
-
-	trace_drv_remove_interface(local, sdata);
-	local->ops->remove_interface(&local->hw, &sdata->vif);
-	sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER;
-	trace_drv_return_void(local);
-}
+void drv_remove_interface(struct ieee80211_local *local,
+			  struct ieee80211_sub_if_data *sdata);
 
 static inline int drv_config(struct ieee80211_local *local, u32 changed)
 {
@@ -260,6 +216,22 @@
 	trace_drv_return_void(local);
 }
 
+static inline void drv_config_iface_filter(struct ieee80211_local *local,
+					   struct ieee80211_sub_if_data *sdata,
+					   unsigned int filter_flags,
+					   unsigned int changed_flags)
+{
+	might_sleep();
+
+	trace_drv_config_iface_filter(local, sdata, filter_flags,
+				      changed_flags);
+	if (local->ops->config_iface_filter)
+		local->ops->config_iface_filter(&local->hw, &sdata->vif,
+						filter_flags,
+						changed_flags);
+	trace_drv_return_void(local);
+}
+
 static inline int drv_set_tim(struct ieee80211_local *local,
 			      struct ieee80211_sta *sta, bool set)
 {
@@ -580,25 +552,9 @@
 		  enum ieee80211_sta_state old_state,
 		  enum ieee80211_sta_state new_state);
 
-static inline void drv_sta_rc_update(struct ieee80211_local *local,
-				     struct ieee80211_sub_if_data *sdata,
-				     struct ieee80211_sta *sta, u32 changed)
-{
-	sdata = get_bss_sdata(sdata);
-	if (!check_sdata_in_driver(sdata))
-		return;
-
-	WARN_ON(changed & IEEE80211_RC_SUPP_RATES_CHANGED &&
-		(sdata->vif.type != NL80211_IFTYPE_ADHOC &&
-		 sdata->vif.type != NL80211_IFTYPE_MESH_POINT));
-
-	trace_drv_sta_rc_update(local, sdata, sta, changed);
-	if (local->ops->sta_rc_update)
-		local->ops->sta_rc_update(&local->hw, &sdata->vif,
-					  sta, changed);
-
-	trace_drv_return_void(local);
-}
+void drv_sta_rc_update(struct ieee80211_local *local,
+		       struct ieee80211_sub_if_data *sdata,
+		       struct ieee80211_sta *sta, u32 changed);
 
 static inline void drv_sta_rate_tbl_update(struct ieee80211_local *local,
 					   struct ieee80211_sub_if_data *sdata,
@@ -630,76 +586,17 @@
 	trace_drv_return_void(local);
 }
 
-static inline int drv_conf_tx(struct ieee80211_local *local,
-			      struct ieee80211_sub_if_data *sdata, u16 ac,
-			      const struct ieee80211_tx_queue_params *params)
-{
-	int ret = -EOPNOTSUPP;
+int drv_conf_tx(struct ieee80211_local *local,
+		struct ieee80211_sub_if_data *sdata, u16 ac,
+		const struct ieee80211_tx_queue_params *params);
 
-	might_sleep();
-
-	if (!check_sdata_in_driver(sdata))
-		return -EIO;
-
-	if (WARN_ONCE(params->cw_min == 0 ||
-		      params->cw_min > params->cw_max,
-		      "%s: invalid CW_min/CW_max: %d/%d\n",
-		      sdata->name, params->cw_min, params->cw_max))
-		return -EINVAL;
-
-	trace_drv_conf_tx(local, sdata, ac, params);
-	if (local->ops->conf_tx)
-		ret = local->ops->conf_tx(&local->hw, &sdata->vif,
-					  ac, params);
-	trace_drv_return_int(local, ret);
-	return ret;
-}
-
-static inline u64 drv_get_tsf(struct ieee80211_local *local,
-			      struct ieee80211_sub_if_data *sdata)
-{
-	u64 ret = -1ULL;
-
-	might_sleep();
-
-	if (!check_sdata_in_driver(sdata))
-		return ret;
-
-	trace_drv_get_tsf(local, sdata);
-	if (local->ops->get_tsf)
-		ret = local->ops->get_tsf(&local->hw, &sdata->vif);
-	trace_drv_return_u64(local, ret);
-	return ret;
-}
-
-static inline void drv_set_tsf(struct ieee80211_local *local,
-			       struct ieee80211_sub_if_data *sdata,
-			       u64 tsf)
-{
-	might_sleep();
-
-	if (!check_sdata_in_driver(sdata))
-		return;
-
-	trace_drv_set_tsf(local, sdata, tsf);
-	if (local->ops->set_tsf)
-		local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
-	trace_drv_return_void(local);
-}
-
-static inline void drv_reset_tsf(struct ieee80211_local *local,
-				 struct ieee80211_sub_if_data *sdata)
-{
-	might_sleep();
-
-	if (!check_sdata_in_driver(sdata))
-		return;
-
-	trace_drv_reset_tsf(local, sdata);
-	if (local->ops->reset_tsf)
-		local->ops->reset_tsf(&local->hw, &sdata->vif);
-	trace_drv_return_void(local);
-}
+u64 drv_get_tsf(struct ieee80211_local *local,
+		struct ieee80211_sub_if_data *sdata);
+void drv_set_tsf(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 u64 tsf);
+void drv_reset_tsf(struct ieee80211_local *local,
+		   struct ieee80211_sub_if_data *sdata);
 
 static inline int drv_tx_last_beacon(struct ieee80211_local *local)
 {
@@ -714,30 +611,11 @@
 	return ret;
 }
 
-static inline int drv_ampdu_action(struct ieee80211_local *local,
-				   struct ieee80211_sub_if_data *sdata,
-				   enum ieee80211_ampdu_mlme_action action,
-				   struct ieee80211_sta *sta, u16 tid,
-				   u16 *ssn, u8 buf_size)
-{
-	int ret = -EOPNOTSUPP;
-
-	might_sleep();
-
-	sdata = get_bss_sdata(sdata);
-	if (!check_sdata_in_driver(sdata))
-		return -EIO;
-
-	trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
-
-	if (local->ops->ampdu_action)
-		ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
-					       sta, tid, ssn, buf_size);
-
-	trace_drv_return_int(local, ret);
-
-	return ret;
-}
+int drv_ampdu_action(struct ieee80211_local *local,
+		     struct ieee80211_sub_if_data *sdata,
+		     enum ieee80211_ampdu_mlme_action action,
+		     struct ieee80211_sta *sta, u16 tid,
+		     u16 *ssn, u8 buf_size, bool amsdu);
 
 static inline int drv_get_survey(struct ieee80211_local *local, int idx,
 				struct survey_info *survey)
@@ -1066,58 +944,9 @@
 	trace_drv_return_void(local);
 }
 
-static inline int
-drv_switch_vif_chanctx(struct ieee80211_local *local,
-		       struct ieee80211_vif_chanctx_switch *vifs,
-		       int n_vifs,
-		       enum ieee80211_chanctx_switch_mode mode)
-{
-	int ret = 0;
-	int i;
-
-	if (!local->ops->switch_vif_chanctx)
-		return -EOPNOTSUPP;
-
-	for (i = 0; i < n_vifs; i++) {
-		struct ieee80211_chanctx *new_ctx =
-			container_of(vifs[i].new_ctx,
-				     struct ieee80211_chanctx,
-				     conf);
-		struct ieee80211_chanctx *old_ctx =
-			container_of(vifs[i].old_ctx,
-				     struct ieee80211_chanctx,
-				     conf);
-
-		WARN_ON_ONCE(!old_ctx->driver_present);
-		WARN_ON_ONCE((mode == CHANCTX_SWMODE_SWAP_CONTEXTS &&
-			      new_ctx->driver_present) ||
-			     (mode == CHANCTX_SWMODE_REASSIGN_VIF &&
-			      !new_ctx->driver_present));
-	}
-
-	trace_drv_switch_vif_chanctx(local, vifs, n_vifs, mode);
-	ret = local->ops->switch_vif_chanctx(&local->hw,
-					     vifs, n_vifs, mode);
-	trace_drv_return_int(local, ret);
-
-	if (!ret && mode == CHANCTX_SWMODE_SWAP_CONTEXTS) {
-		for (i = 0; i < n_vifs; i++) {
-			struct ieee80211_chanctx *new_ctx =
-				container_of(vifs[i].new_ctx,
-					     struct ieee80211_chanctx,
-					     conf);
-			struct ieee80211_chanctx *old_ctx =
-				container_of(vifs[i].old_ctx,
-					     struct ieee80211_chanctx,
-					     conf);
-
-			new_ctx->driver_present = true;
-			old_ctx->driver_present = false;
-		}
-	}
-
-	return ret;
-}
+int drv_switch_vif_chanctx(struct ieee80211_local *local,
+			   struct ieee80211_vif_chanctx_switch *vifs,
+			   int n_vifs, enum ieee80211_chanctx_switch_mode mode);
 
 static inline int drv_start_ap(struct ieee80211_local *local,
 			       struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index 188faab..9cc986d 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -40,7 +40,7 @@
 	"rx_duplicates", "rx_fragments", "rx_dropped",
 	"tx_packets", "tx_bytes",
 	"tx_filtered", "tx_retry_failed", "tx_retries",
-	"beacon_loss", "sta_state", "txrate", "rxrate", "signal",
+	"sta_state", "txrate", "rxrate", "signal",
 	"channel", "noise", "ch_time", "ch_time_busy",
 	"ch_time_ext_busy", "ch_time_rx", "ch_time_tx"
 };
@@ -77,20 +77,19 @@
 
 	memset(data, 0, sizeof(u64) * STA_STATS_LEN);
 
-#define ADD_STA_STATS(sta)				\
-	do {						\
-		data[i++] += sta->rx_packets;		\
-		data[i++] += sta->rx_bytes;		\
-		data[i++] += sta->num_duplicates;	\
-		data[i++] += sta->rx_fragments;		\
-		data[i++] += sta->rx_dropped;		\
-							\
-		data[i++] += sinfo.tx_packets;		\
-		data[i++] += sinfo.tx_bytes;		\
-		data[i++] += sta->tx_filtered_count;	\
-		data[i++] += sta->tx_retry_failed;	\
-		data[i++] += sta->tx_retry_count;	\
-		data[i++] += sta->beacon_loss_count;	\
+#define ADD_STA_STATS(sta)					\
+	do {							\
+		data[i++] += sta->rx_stats.packets;		\
+		data[i++] += sta->rx_stats.bytes;		\
+		data[i++] += sta->rx_stats.num_duplicates;	\
+		data[i++] += sta->rx_stats.fragments;		\
+		data[i++] += sta->rx_stats.dropped;		\
+								\
+		data[i++] += sinfo.tx_packets;			\
+		data[i++] += sinfo.tx_bytes;			\
+		data[i++] += sta->status_stats.filtered;	\
+		data[i++] += sta->status_stats.retry_failed;	\
+		data[i++] += sta->status_stats.retry_count;	\
 	} while (0)
 
 	/* For Managed stations, find the single station based on BSSID
diff --git a/net/mac80211/event.c b/net/mac80211/event.c
deleted file mode 100644
index 01ae759..0000000
--- a/net/mac80211/event.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * mac80211 - events
- */
-#include <net/cfg80211.h>
-#include "ieee80211_i.h"
-
-/*
- * Indicate a failed Michael MIC to userspace. If the caller knows the TSC of
- * the frame that generated the MIC failure (i.e., if it was provided by the
- * driver or is still in the frame), it should provide that information.
- */
-void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
-				     struct ieee80211_hdr *hdr, const u8 *tsc,
-				     gfp_t gfp)
-{
-	cfg80211_michael_mic_failure(sdata->dev, hdr->addr2,
-				     (hdr->addr1[0] & 0x01) ?
-				     NL80211_KEYTYPE_GROUP :
-				     NL80211_KEYTYPE_PAIRWISE,
-				     keyidx, tsc, gfp);
-}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 7f72bc9..2001555 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -229,7 +229,7 @@
 	struct cfg80211_chan_def chandef;
 	struct ieee80211_channel *chan;
 	struct beacon_data *presp;
-	enum nl80211_bss_scan_width scan_width;
+	struct cfg80211_inform_bss bss_meta = {};
 	bool have_higher_than_11mbit;
 	bool radar_required;
 	int err;
@@ -383,10 +383,11 @@
 	mod_timer(&ifibss->timer,
 		  round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
 
-	scan_width = cfg80211_chandef_to_scan_width(&chandef);
-	bss = cfg80211_inform_bss_width_frame(local->hw.wiphy, chan,
-					      scan_width, mgmt,
-					      presp->head_len, 0, GFP_KERNEL);
+	bss_meta.chan = chan;
+	bss_meta.scan_width = cfg80211_chandef_to_scan_width(&chandef);
+	bss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, mgmt,
+					     presp->head_len, GFP_KERNEL);
+
 	cfg80211_put_bss(local->hw.wiphy, bss);
 	netif_carrier_on(sdata->dev);
 	cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL);
@@ -646,7 +647,7 @@
 		return NULL;
 	}
 
-	sta->last_rx = jiffies;
+	sta->rx_stats.last_rx = jiffies;
 
 	/* make sure mandatory rates are always added */
 	sband = local->hw.wiphy->bands[band];
@@ -668,7 +669,8 @@
 
 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
 		if (sta->sdata == sdata &&
-		    time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL,
+		    time_after(sta->rx_stats.last_rx +
+			       IEEE80211_IBSS_MERGE_INTERVAL,
 			       jiffies)) {
 			active++;
 			break;
@@ -1234,7 +1236,7 @@
 	if (!sta)
 		return;
 
-	sta->last_rx = jiffies;
+	sta->rx_stats.last_rx = jiffies;
 
 	/* make sure mandatory rates are always added */
 	sband = local->hw.wiphy->bands[band];
@@ -1252,7 +1254,7 @@
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta, *tmp;
 	unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT;
-	unsigned long exp_rsn_time = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT;
+	unsigned long exp_rsn = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT;
 
 	mutex_lock(&local->sta_mtx);
 
@@ -1260,8 +1262,8 @@
 		if (sdata != sta->sdata)
 			continue;
 
-		if (time_after(jiffies, sta->last_rx + exp_time) ||
-		    (time_after(jiffies, sta->last_rx + exp_rsn_time) &&
+		if (time_after(jiffies, sta->rx_stats.last_rx + exp_time) ||
+		    (time_after(jiffies, sta->rx_stats.last_rx + exp_rsn) &&
 		     sta->sta_state != IEEE80211_STA_AUTHORIZED)) {
 			sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n",
 				sta->sta_state != IEEE80211_STA_AUTHORIZED ?
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 6e52659..62f2a97 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -34,6 +34,8 @@
 #include "sta_info.h"
 #include "debug.h"
 
+extern const struct cfg80211_ops mac80211_config_ops;
+
 struct ieee80211_local;
 
 /* Maximum number of broadcast/multicast frames to buffer when some of the
@@ -419,6 +421,8 @@
 	bool downgraded;
 };
 
+DECLARE_EWMA(beacon_signal, 16, 4)
+
 struct ieee80211_if_managed {
 	struct timer_list timer;
 	struct timer_list conn_mon_timer;
@@ -490,16 +494,7 @@
 
 	s16 p2p_noa_index;
 
-	/* Signal strength from the last Beacon frame in the current BSS. */
-	int last_beacon_signal;
-
-	/*
-	 * Weighted average of the signal strength from Beacon frames in the
-	 * current BSS. This is in units of 1/16 of the signal unit to maintain
-	 * accuracy and to speed up calculations, i.e., the value need to be
-	 * divided by 16 to get the actual value.
-	 */
-	int ave_beacon_signal;
+	struct ewma_beacon_signal ave_beacon_signal;
 
 	/*
 	 * Number of Beacon frames used in ave_beacon_signal. This can be used
@@ -508,6 +503,9 @@
 	 */
 	unsigned int count_beacon_signal;
 
+	/* Number of times beacon loss was invoked. */
+	unsigned int beacon_loss_count;
+
 	/*
 	 * Last Beacon frame signal strength average (ave_beacon_signal / 16)
 	 * that triggered a cqm event. 0 indicates that no event has been
@@ -535,6 +533,7 @@
 	struct sk_buff *teardown_skb; /* A copy to send through the AP */
 	spinlock_t teardown_lock; /* To lock changing teardown_skb */
 	bool tdls_chan_switch_prohibited;
+	bool tdls_wider_bw_prohibited;
 
 	/* WMM-AC TSPEC support */
 	struct ieee80211_sta_tx_tspec tx_tspec[IEEE80211_NUM_ACS];
@@ -1311,7 +1310,6 @@
 	struct work_struct dynamic_ps_enable_work;
 	struct work_struct dynamic_ps_disable_work;
 	struct timer_list dynamic_ps_timer;
-	struct notifier_block network_latency_notifier;
 	struct notifier_block ifa_notifier;
 	struct notifier_block ifa6_notifier;
 
@@ -1497,10 +1495,8 @@
 			   struct cfg80211_disassoc_request *req);
 void ieee80211_send_pspoll(struct ieee80211_local *local,
 			   struct ieee80211_sub_if_data *sdata);
-void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
+void ieee80211_recalc_ps(struct ieee80211_local *local);
 void ieee80211_recalc_ps_vif(struct ieee80211_sub_if_data *sdata);
-int ieee80211_max_network_latency(struct notifier_block *nb,
-				  unsigned long data, void *dummy);
 int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
@@ -1641,6 +1637,9 @@
 struct sk_buff *
 ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
 			      struct sk_buff *skb, u32 info_flags);
+void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
+			  struct ieee80211_supported_band *sband,
+			  int retry_count, int shift, bool send_to_cooked);
 
 void ieee80211_check_fast_xmit(struct sta_info *sta);
 void ieee80211_check_fast_xmit_all(struct ieee80211_local *local);
@@ -1769,9 +1768,6 @@
 int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
 			     int rate, int erp, int short_preamble,
 			     int shift);
-void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
-				     struct ieee80211_hdr *hdr, const u8 *tsc,
-				     gfp_t gfp);
 void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
 			       bool bss_notify);
 void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
@@ -1853,7 +1849,7 @@
 void ieee80211_dynamic_ps_timer(unsigned long data);
 void ieee80211_send_nullfunc(struct ieee80211_local *local,
 			     struct ieee80211_sub_if_data *sdata,
-			     int powersave);
+			     bool powersave);
 void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
 			     struct ieee80211_hdr *hdr);
 void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 6964fc6..f848c75 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -709,7 +709,7 @@
 	if (hw_reconf_flags)
 		ieee80211_hw_config(local, hw_reconf_flags);
 
-	ieee80211_recalc_ps(local, -1);
+	ieee80211_recalc_ps(local);
 
 	if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
 	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
@@ -1016,7 +1016,7 @@
 			drv_remove_interface(local, sdata);
 	}
 
-	ieee80211_recalc_ps(local, -1);
+	ieee80211_recalc_ps(local);
 
 	if (cancel_scan)
 		flush_delayed_work(&local->scan_work);
@@ -1204,7 +1204,7 @@
 	if (!ieee80211_sdata_running(sdata))
 		return;
 
-	if (local->scanning)
+	if (test_bit(SCAN_SW_SCANNING, &local->scanning))
 		return;
 
 	if (!ieee80211_can_run_worker(local))
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index ff79a13..273c96d 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -20,7 +20,6 @@
 #include <linux/if_arp.h>
 #include <linux/rtnetlink.h>
 #include <linux/bitmap.h>
-#include <linux/pm_qos.h>
 #include <linux/inetdevice.h>
 #include <net/net_namespace.h>
 #include <net/cfg80211.h>
@@ -32,7 +31,6 @@
 #include "mesh.h"
 #include "wep.h"
 #include "led.h"
-#include "cfg.h"
 #include "debugfs.h"
 
 void ieee80211_configure_filter(struct ieee80211_local *local)
@@ -543,7 +541,8 @@
 			   NL80211_FEATURE_HT_IBSS |
 			   NL80211_FEATURE_VIF_TXPOWER |
 			   NL80211_FEATURE_MAC_ON_CREATE |
-			   NL80211_FEATURE_USERSPACE_MPM;
+			   NL80211_FEATURE_USERSPACE_MPM |
+			   NL80211_FEATURE_FULL_AP_CLIENT_STATE;
 
 	if (!ops->hw_scan)
 		wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -1082,13 +1081,6 @@
 
 	rtnl_unlock();
 
-	local->network_latency_notifier.notifier_call =
-		ieee80211_max_network_latency;
-	result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
-				     &local->network_latency_notifier);
-	if (result)
-		goto fail_pm_qos;
-
 #ifdef CONFIG_INET
 	local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
 	result = register_inetaddr_notifier(&local->ifa_notifier);
@@ -1113,10 +1105,7 @@
 #endif
 #if defined(CONFIG_INET) || defined(CONFIG_IPV6)
  fail_ifa:
-	pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
-			       &local->network_latency_notifier);
 #endif
- fail_pm_qos:
 	rtnl_lock();
 	rate_control_deinitialize(local);
 	ieee80211_remove_interfaces(local);
@@ -1142,8 +1131,6 @@
 	tasklet_kill(&local->tx_pending_tasklet);
 	tasklet_kill(&local->tasklet);
 
-	pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
-			       &local->network_latency_notifier);
 #ifdef CONFIG_INET
 	unregister_inetaddr_notifier(&local->ifa_notifier);
 #endif
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index e06a5ca..626e8de 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -94,6 +94,9 @@
 	ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
 				     ie->ht_operation, &sta_chan_def);
 
+	ieee80211_vht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
+				      ie->vht_operation, &sta_chan_def);
+
 	if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
 					 &sta_chan_def))
 		return false;
@@ -436,8 +439,6 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	struct ieee80211_channel *channel;
-	enum nl80211_channel_type channel_type =
-		cfg80211_get_chandef_type(&sdata->vif.bss_conf.chandef);
 	struct ieee80211_supported_band *sband;
 	struct ieee80211_sta_ht_cap *ht_cap;
 	u8 *pos;
@@ -454,7 +455,10 @@
 	sband = local->hw.wiphy->bands[channel->band];
 	ht_cap = &sband->ht_cap;
 
-	if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT)
+	if (!ht_cap->ht_supported ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
 		return 0;
 
 	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation))
@@ -467,6 +471,68 @@
 	return 0;
 }
 
+int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata,
+			struct sk_buff *skb)
+{
+	struct ieee80211_local *local = sdata->local;
+	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	struct ieee80211_supported_band *sband;
+	u8 *pos;
+
+	sband = local->hw.wiphy->bands[band];
+	if (!sband->vht_cap.vht_supported ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+		return 0;
+
+	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap))
+		return -ENOMEM;
+
+	pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_cap));
+	ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, sband->vht_cap.cap);
+
+	return 0;
+}
+
+int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
+			 struct sk_buff *skb)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_channel *channel;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_sta_vht_cap *vht_cap;
+	u8 *pos;
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+	if (WARN_ON(!chanctx_conf)) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	channel = chanctx_conf->def.chan;
+	rcu_read_unlock();
+
+	sband = local->hw.wiphy->bands[channel->band];
+	vht_cap = &sband->vht_cap;
+
+	if (!vht_cap->vht_supported ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_5 ||
+	    sdata->vif.bss_conf.chandef.width == NL80211_CHAN_WIDTH_10)
+		return 0;
+
+	if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_operation))
+		return -ENOMEM;
+
+	pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
+	ieee80211_ie_build_vht_oper(pos, vht_cap,
+				    &sdata->vif.bss_conf.chandef);
+
+	return 0;
+}
+
 static void ieee80211_mesh_path_timer(unsigned long data)
 {
 	struct ieee80211_sub_if_data *sdata =
@@ -540,9 +606,9 @@
  *
  * Return the header length.
  */
-int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata,
-			      struct ieee80211s_hdr *meshhdr,
-			      const char *addr4or5, const char *addr6)
+unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata,
+				       struct ieee80211s_hdr *meshhdr,
+				       const char *addr4or5, const char *addr6)
 {
 	if (WARN_ON(!addr4or5 && addr6))
 		return 0;
@@ -637,6 +703,8 @@
 		   2 + ifmsh->mesh_id_len +
 		   2 + sizeof(struct ieee80211_meshconf_ie) +
 		   2 + sizeof(__le16) + /* awake window */
+		   2 + sizeof(struct ieee80211_vht_cap) +
+		   2 + sizeof(struct ieee80211_vht_operation) +
 		   ifmsh->ie_len;
 
 	bcn = kzalloc(sizeof(*bcn) + head_len + tail_len, GFP_KERNEL);
@@ -718,6 +786,8 @@
 	    mesh_add_meshid_ie(sdata, skb) ||
 	    mesh_add_meshconf_ie(sdata, skb) ||
 	    mesh_add_awake_window_ie(sdata, skb) ||
+	    mesh_add_vht_cap_ie(sdata, skb) ||
+	    mesh_add_vht_oper_ie(sdata, skb) ||
 	    mesh_add_vendor_ies(sdata, skb))
 		goto out_free;
 
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 50c8473..a159634 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -207,9 +207,9 @@
 /* Various */
 int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
 				  const u8 *da, const u8 *sa);
-int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata,
-			      struct ieee80211s_hdr *meshhdr,
-			      const char *addr4or5, const char *addr6);
+unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata,
+				       struct ieee80211s_hdr *meshhdr,
+				       const char *addr4or5, const char *addr6);
 int mesh_rmc_check(struct ieee80211_sub_if_data *sdata,
 		   const u8 *addr, struct ieee80211s_hdr *mesh_hdr);
 bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
@@ -227,6 +227,10 @@
 		       struct sk_buff *skb);
 int mesh_add_ht_oper_ie(struct ieee80211_sub_if_data *sdata,
 			struct sk_buff *skb);
+int mesh_add_vht_cap_ie(struct ieee80211_sub_if_data *sdata,
+			struct sk_buff *skb);
+int mesh_add_vht_oper_ie(struct ieee80211_sub_if_data *sdata,
+			 struct sk_buff *skb);
 void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_init(void);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index d80e0a4..c6be0b4 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -329,7 +329,7 @@
 	if (sta->mesh->fail_avg >= 100)
 		return MAX_METRIC;
 
-	sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
+	sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo);
 	rate = cfg80211_calculate_bitrate(&rinfo);
 	if (WARN_ON(!rate))
 		return MAX_METRIC;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 5838464..c1f8892 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -60,7 +60,9 @@
 {
 	s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
 	return rssi_threshold == 0 ||
-	       (sta && (s8) -ewma_signal_read(&sta->avg_signal) > rssi_threshold);
+	       (sta &&
+		(s8)-ewma_signal_read(&sta->rx_stats.avg_signal) >
+						rssi_threshold);
 }
 
 /**
@@ -226,6 +228,8 @@
 			    2 + sizeof(struct ieee80211_meshconf_ie) +
 			    2 + sizeof(struct ieee80211_ht_cap) +
 			    2 + sizeof(struct ieee80211_ht_operation) +
+			    2 + sizeof(struct ieee80211_vht_cap) +
+			    2 + sizeof(struct ieee80211_vht_operation) +
 			    2 + 8 + /* peering IE */
 			    sdata->u.mesh.ie_len);
 	if (!skb)
@@ -306,7 +310,9 @@
 
 	if (action != WLAN_SP_MESH_PEERING_CLOSE) {
 		if (mesh_add_ht_cap_ie(sdata, skb) ||
-		    mesh_add_ht_oper_ie(sdata, skb))
+		    mesh_add_ht_oper_ie(sdata, skb) ||
+		    mesh_add_vht_cap_ie(sdata, skb) ||
+		    mesh_add_vht_oper_ie(sdata, skb))
 			goto free;
 	}
 
@@ -386,7 +392,7 @@
 	rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
 
 	spin_lock_bh(&sta->mesh->plink_lock);
-	sta->last_rx = jiffies;
+	sta->rx_stats.last_rx = jiffies;
 
 	/* rates and capabilities don't change during peering */
 	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
@@ -402,6 +408,9 @@
 					      elems->ht_cap_elem, sta))
 		changed |= IEEE80211_RC_BW_CHANGED;
 
+	ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
+					    elems->vht_cap_elem, sta);
+
 	if (bw != sta->sta.bandwidth)
 		changed |= IEEE80211_RC_BW_CHANGED;
 
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index cd7e55e..ded4b97 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -20,7 +20,6 @@
 #include <linux/etherdevice.h>
 #include <linux/moduleparam.h>
 #include <linux/rtnetlink.h>
-#include <linux/pm_qos.h>
 #include <linux/crc32.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -82,13 +81,6 @@
 		 " before disconnecting (reason 4).");
 
 /*
- * Weight given to the latest Beacon frame when calculating average signal
- * strength for Beacon frames received in the current BSS. This must be
- * between 1 and 15.
- */
-#define IEEE80211_SIGNAL_AVE_WEIGHT	3
-
-/*
  * How many Beacon frames need to have been used in average signal strength
  * before starting to indicate signal change events.
  */
@@ -943,7 +935,7 @@
 
 void ieee80211_send_nullfunc(struct ieee80211_local *local,
 			     struct ieee80211_sub_if_data *sdata,
-			     int powersave)
+			     bool powersave)
 {
 	struct sk_buff *skb;
 	struct ieee80211_hdr_3addr *nullfunc;
@@ -1427,7 +1419,7 @@
 			  msecs_to_jiffies(conf->dynamic_ps_timeout));
 	} else {
 		if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
-			ieee80211_send_nullfunc(local, sdata, 1);
+			ieee80211_send_nullfunc(local, sdata, true);
 
 		if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
 		    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
@@ -1483,7 +1475,7 @@
 }
 
 /* need to hold RTNL or interface lock */
-void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
+void ieee80211_recalc_ps(struct ieee80211_local *local)
 {
 	struct ieee80211_sub_if_data *sdata, *found = NULL;
 	int count = 0;
@@ -1512,48 +1504,23 @@
 	}
 
 	if (count == 1 && ieee80211_powersave_allowed(found)) {
+		u8 dtimper = found->u.mgd.dtim_period;
 		s32 beaconint_us;
 
-		if (latency < 0)
-			latency = pm_qos_request(PM_QOS_NETWORK_LATENCY);
-
 		beaconint_us = ieee80211_tu_to_usec(
 					found->vif.bss_conf.beacon_int);
 
 		timeout = local->dynamic_ps_forced_timeout;
-		if (timeout < 0) {
-			/*
-			 * Go to full PSM if the user configures a very low
-			 * latency requirement.
-			 * The 2000 second value is there for compatibility
-			 * until the PM_QOS_NETWORK_LATENCY is configured
-			 * with real values.
-			 */
-			if (latency > (1900 * USEC_PER_MSEC) &&
-			    latency != (2000 * USEC_PER_SEC))
-				timeout = 0;
-			else
-				timeout = 100;
-		}
+		if (timeout < 0)
+			timeout = 100;
 		local->hw.conf.dynamic_ps_timeout = timeout;
 
-		if (beaconint_us > latency) {
-			local->ps_sdata = NULL;
-		} else {
-			int maxslp = 1;
-			u8 dtimper = found->u.mgd.dtim_period;
+		/* If the TIM IE is invalid, pretend the value is 1 */
+		if (!dtimper)
+			dtimper = 1;
 
-			/* If the TIM IE is invalid, pretend the value is 1 */
-			if (!dtimper)
-				dtimper = 1;
-			else if (dtimper > 1)
-				maxslp = min_t(int, dtimper,
-						    latency / beaconint_us);
-
-			local->hw.conf.max_sleep_period = maxslp;
-			local->hw.conf.ps_dtim_period = dtimper;
-			local->ps_sdata = found;
-		}
+		local->hw.conf.ps_dtim_period = dtimper;
+		local->ps_sdata = found;
 	} else {
 		local->ps_sdata = NULL;
 	}
@@ -1642,7 +1609,7 @@
 				  msecs_to_jiffies(
 				  local->hw.conf.dynamic_ps_timeout));
 		} else {
-			ieee80211_send_nullfunc(local, sdata, 1);
+			ieee80211_send_nullfunc(local, sdata, true);
 			/* Flush to get the tx status of nullfunc frame */
 			ieee80211_flush_queues(local, sdata, false);
 		}
@@ -2004,7 +1971,7 @@
 	ieee80211_bss_info_change_notify(sdata, bss_info_changed);
 
 	mutex_lock(&local->iflist_mtx);
-	ieee80211_recalc_ps(local, -1);
+	ieee80211_recalc_ps(local);
 	mutex_unlock(&local->iflist_mtx);
 
 	ieee80211_recalc_smps(sdata);
@@ -2172,7 +2139,7 @@
 	__ieee80211_stop_poll(sdata);
 
 	mutex_lock(&local->iflist_mtx);
-	ieee80211_recalc_ps(local, -1);
+	ieee80211_recalc_ps(local);
 	mutex_unlock(&local->iflist_mtx);
 
 	if (ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
@@ -2275,7 +2242,7 @@
 
 	if (ieee80211_hw_check(&sdata->local->hw, REPORTS_TX_ACK_STATUS)) {
 		ifmgd->nullfunc_failed = false;
-		ieee80211_send_nullfunc(sdata->local, sdata, 0);
+		ieee80211_send_nullfunc(sdata->local, sdata, false);
 	} else {
 		int ssid_len;
 
@@ -2348,7 +2315,7 @@
 		goto out;
 
 	mutex_lock(&sdata->local->iflist_mtx);
-	ieee80211_recalc_ps(sdata->local, -1);
+	ieee80211_recalc_ps(sdata->local);
 	mutex_unlock(&sdata->local->iflist_mtx);
 
 	ifmgd->probe_send_count = 0;
@@ -2453,15 +2420,9 @@
 		container_of(work, struct ieee80211_sub_if_data,
 			     u.mgd.beacon_connection_loss_work);
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-	struct sta_info *sta;
 
-	if (ifmgd->associated) {
-		rcu_read_lock();
-		sta = sta_info_get(sdata, ifmgd->bssid);
-		if (sta)
-			sta->beacon_loss_count++;
-		rcu_read_unlock();
-	}
+	if (ifmgd->associated)
+		ifmgd->beacon_loss_count++;
 
 	if (ifmgd->connection_loss) {
 		sdata_info(sdata, "Connection to AP %pM lost\n",
@@ -3051,8 +3012,12 @@
 
 	rate_control_rate_init(sta);
 
-	if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
+	if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
 		set_sta_flag(sta, WLAN_STA_MFP);
+		sta->sta.mfp = true;
+	} else {
+		sta->sta.mfp = false;
+	}
 
 	sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
 
@@ -3262,16 +3227,6 @@
 	if (ifmgd->associated &&
 	    ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid))
 		ieee80211_reset_ap_probe(sdata);
-
-	if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
-	    ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) {
-		/* got probe response, continue with auth */
-		sdata_info(sdata, "direct probe responded\n");
-		ifmgd->auth_data->tries = 0;
-		ifmgd->auth_data->timeout = jiffies;
-		ifmgd->auth_data->timeout_started = true;
-		run_again(sdata, ifmgd->auth_data->timeout);
-	}
 }
 
 /*
@@ -3374,24 +3329,21 @@
 	bssid = ifmgd->associated->bssid;
 
 	/* Track average RSSI from the Beacon frames of the current AP */
-	ifmgd->last_beacon_signal = rx_status->signal;
 	if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
 		ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
-		ifmgd->ave_beacon_signal = rx_status->signal * 16;
+		ewma_beacon_signal_init(&ifmgd->ave_beacon_signal);
 		ifmgd->last_cqm_event_signal = 0;
 		ifmgd->count_beacon_signal = 1;
 		ifmgd->last_ave_beacon_signal = 0;
 	} else {
-		ifmgd->ave_beacon_signal =
-			(IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 +
-			 (16 - IEEE80211_SIGNAL_AVE_WEIGHT) *
-			 ifmgd->ave_beacon_signal) / 16;
 		ifmgd->count_beacon_signal++;
 	}
 
+	ewma_beacon_signal_add(&ifmgd->ave_beacon_signal, -rx_status->signal);
+
 	if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold &&
 	    ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
-		int sig = ifmgd->ave_beacon_signal;
+		int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal);
 		int last_sig = ifmgd->last_ave_beacon_signal;
 		struct ieee80211_event event = {
 			.type = RSSI_EVENT,
@@ -3418,10 +3370,11 @@
 	if (bss_conf->cqm_rssi_thold &&
 	    ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
 	    !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) {
-		int sig = ifmgd->ave_beacon_signal / 16;
+		int sig = -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal);
 		int last_event = ifmgd->last_cqm_event_signal;
 		int thold = bss_conf->cqm_rssi_thold;
 		int hyst = bss_conf->cqm_rssi_hyst;
+
 		if (sig < thold &&
 		    (last_event == 0 || sig < last_event - hyst)) {
 			ifmgd->last_cqm_event_signal = sig;
@@ -3456,31 +3409,27 @@
 					  len - baselen, false, &elems,
 					  care_about_ies, ncrc);
 
-	if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) {
-		bool directed_tim = ieee80211_check_tim(elems.tim,
-							elems.tim_len,
-							ifmgd->aid);
-		if (directed_tim) {
-			if (local->hw.conf.dynamic_ps_timeout > 0) {
-				if (local->hw.conf.flags & IEEE80211_CONF_PS) {
-					local->hw.conf.flags &= ~IEEE80211_CONF_PS;
-					ieee80211_hw_config(local,
-							    IEEE80211_CONF_CHANGE_PS);
-				}
-				ieee80211_send_nullfunc(local, sdata, 0);
-			} else if (!local->pspolling && sdata->u.mgd.powersave) {
-				local->pspolling = true;
-
-				/*
-				 * Here is assumed that the driver will be
-				 * able to send ps-poll frame and receive a
-				 * response even though power save mode is
-				 * enabled, but some drivers might require
-				 * to disable power save here. This needs
-				 * to be investigated.
-				 */
-				ieee80211_send_pspoll(local, sdata);
+	if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) &&
+	    ieee80211_check_tim(elems.tim, elems.tim_len, ifmgd->aid)) {
+		if (local->hw.conf.dynamic_ps_timeout > 0) {
+			if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+				local->hw.conf.flags &= ~IEEE80211_CONF_PS;
+				ieee80211_hw_config(local,
+						    IEEE80211_CONF_CHANGE_PS);
 			}
+			ieee80211_send_nullfunc(local, sdata, false);
+		} else if (!local->pspolling && sdata->u.mgd.powersave) {
+			local->pspolling = true;
+
+			/*
+			 * Here is assumed that the driver will be
+			 * able to send ps-poll frame and receive a
+			 * response even though power save mode is
+			 * enabled, but some drivers might require
+			 * to disable power save here. This needs
+			 * to be investigated.
+			 */
+			ieee80211_send_pspoll(local, sdata);
 		}
 	}
 
@@ -3567,7 +3516,7 @@
 		ifmgd->have_beacon = true;
 
 		mutex_lock(&local->iflist_mtx);
-		ieee80211_recalc_ps(local, -1);
+		ieee80211_recalc_ps(local);
 		mutex_unlock(&local->iflist_mtx);
 
 		ieee80211_recalc_ps_vif(sdata);
@@ -3717,12 +3666,14 @@
 				    reason);
 }
 
-static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
+static int ieee80211_auth(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data;
 	u32 tx_flags = 0;
+	u16 trans = 1;
+	u16 status = 0;
 
 	sdata_assert_lock(sdata);
 
@@ -3746,55 +3697,28 @@
 
 	drv_mgd_prepare_tx(local, sdata);
 
-	if (auth_data->bss->proberesp_ies) {
-		u16 trans = 1;
-		u16 status = 0;
+	sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
+		   auth_data->bss->bssid, auth_data->tries,
+		   IEEE80211_AUTH_MAX_TRIES);
 
-		sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
-			   auth_data->bss->bssid, auth_data->tries,
-			   IEEE80211_AUTH_MAX_TRIES);
+	auth_data->expected_transaction = 2;
 
-		auth_data->expected_transaction = 2;
-
-		if (auth_data->algorithm == WLAN_AUTH_SAE) {
-			trans = auth_data->sae_trans;
-			status = auth_data->sae_status;
-			auth_data->expected_transaction = trans;
-		}
-
-		if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
-			tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
-				   IEEE80211_TX_INTFL_MLME_CONN_TX;
-
-		ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
-				    auth_data->data, auth_data->data_len,
-				    auth_data->bss->bssid,
-				    auth_data->bss->bssid, NULL, 0, 0,
-				    tx_flags);
-	} else {
-		const u8 *ssidie;
-
-		sdata_info(sdata, "direct probe to %pM (try %d/%i)\n",
-			   auth_data->bss->bssid, auth_data->tries,
-			   IEEE80211_AUTH_MAX_TRIES);
-
-		rcu_read_lock();
-		ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID);
-		if (!ssidie) {
-			rcu_read_unlock();
-			return -EINVAL;
-		}
-		/*
-		 * Direct probe is sent to broadcast address as some APs
-		 * will not answer to direct packet in unassociated state.
-		 */
-		ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL,
-					 ssidie + 2, ssidie[1],
-					 NULL, 0, (u32) -1, true, 0,
-					 auth_data->bss->channel, false);
-		rcu_read_unlock();
+	if (auth_data->algorithm == WLAN_AUTH_SAE) {
+		trans = auth_data->sae_trans;
+		status = auth_data->sae_status;
+		auth_data->expected_transaction = trans;
 	}
 
+	if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
+		tx_flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
+			   IEEE80211_TX_INTFL_MLME_CONN_TX;
+
+	ieee80211_send_auth(sdata, trans, auth_data->algorithm, status,
+			    auth_data->data, auth_data->data_len,
+			    auth_data->bss->bssid,
+			    auth_data->bss->bssid, NULL, 0, 0,
+			    tx_flags);
+
 	if (tx_flags == 0) {
 		auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
 		auth_data->timeout_started = true;
@@ -3874,8 +3798,7 @@
 		bool status_acked = ifmgd->status_acked;
 
 		ifmgd->status_received = false;
-		if (ifmgd->auth_data &&
-		    (ieee80211_is_probe_req(fc) || ieee80211_is_auth(fc))) {
+		if (ifmgd->auth_data && ieee80211_is_auth(fc)) {
 			if (status_acked) {
 				ifmgd->auth_data->timeout =
 					jiffies + IEEE80211_AUTH_TIMEOUT_SHORT;
@@ -3906,7 +3829,7 @@
 			 * so let's just kill the auth data
 			 */
 			ieee80211_destroy_auth_data(sdata, false);
-		} else if (ieee80211_probe_auth(sdata)) {
+		} else if (ieee80211_auth(sdata)) {
 			u8 bssid[ETH_ALEN];
 			struct ieee80211_event event = {
 				.type = MLME_EVENT,
@@ -4197,21 +4120,6 @@
 	rcu_read_unlock();
 }
 
-int ieee80211_max_network_latency(struct notifier_block *nb,
-				  unsigned long data, void *dummy)
-{
-	s32 latency_usec = (s32) data;
-	struct ieee80211_local *local =
-		container_of(nb, struct ieee80211_local,
-			     network_latency_notifier);
-
-	mutex_lock(&local->iflist_mtx);
-	ieee80211_recalc_ps(local, latency_usec);
-	mutex_unlock(&local->iflist_mtx);
-
-	return NOTIFY_OK;
-}
-
 static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
 				     struct cfg80211_bss *cbss)
 {
@@ -4613,7 +4521,7 @@
 	if (err)
 		goto err_clear;
 
-	err = ieee80211_probe_auth(sdata);
+	err = ieee80211_auth(sdata);
 	if (err) {
 		sta_info_destroy_addr(sdata, req->bss->bssid);
 		goto err_clear;
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 573b81a..0be0aad 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -75,7 +75,7 @@
 	if (!sta)
 		return;
 
-	sta->last_rx = jiffies;
+	sta->rx_stats.last_rx = jiffies;
 
 	/* Add only mandatory rates for now */
 	sband = local->hw.wiphy->bands[band];
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index f2c75cf..0440103 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -57,7 +57,7 @@
 		 * to send a new nullfunc frame to inform the AP that we
 		 * are again sleeping.
 		 */
-		ieee80211_send_nullfunc(local, sdata, 1);
+		ieee80211_send_nullfunc(local, sdata, true);
 }
 
 /* inform AP that we are awake again, unless power save is enabled */
@@ -66,7 +66,7 @@
 	struct ieee80211_local *local = sdata->local;
 
 	if (!local->ps_sdata)
-		ieee80211_send_nullfunc(local, sdata, 0);
+		ieee80211_send_nullfunc(local, sdata, false);
 	else if (local->offchannel_ps_enabled) {
 		/*
 		 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
@@ -93,7 +93,7 @@
 		 * restart the timer now and send a nullfunc frame to inform
 		 * the AP that we are awake.
 		 */
-		ieee80211_send_nullfunc(local, sdata, 0);
+		ieee80211_send_nullfunc(local, sdata, false);
 		mod_timer(&local->dynamic_ps_timer, jiffies +
 			  msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
 	}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index b676b9f..ad88ad4 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -23,7 +23,8 @@
 
 	ieee80211_del_virtual_monitor(local);
 
-	if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) {
+	if (ieee80211_hw_check(hw, AMPDU_AGGREGATION) &&
+	    !(wowlan && wowlan->any)) {
 		mutex_lock(&local->sta_mtx);
 		list_for_each_entry(sta, &local->sta_list, list) {
 			set_sta_flag(sta, WLAN_STA_BLOCK_BA);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 9ce8883..b07e2f7 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -305,7 +305,10 @@
 		info->control.rates[0].idx = i;
 		break;
 	}
-	WARN_ON_ONCE(i == sband->n_bitrates);
+	WARN_ONCE(i == sband->n_bitrates,
+		  "no supported rates (0x%x) in rate_mask 0x%x with flags 0x%x\n",
+		  sta ? sta->supp_rates[sband->band] : 0,
+		  rate_mask, rate_flags);
 
 	info->control.rates[0].count =
 		(info->flags & IEEE80211_TX_CTL_NO_ACK) ?
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 1db5f7c..820b0ab 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -85,12 +85,10 @@
 	file->private_data = ms;
 	p = ms->buf;
 	p += sprintf(p, "\n");
-	p += sprintf(p, "best   __________rate_________    ______"
-			"statistics______    ________last_______    "
-			"______sum-of________\n");
-	p += sprintf(p, "rate  [name idx airtime max_tp]  [ ø(tp) ø(prob) "
-			"sd(prob)]  [prob.|retry|suc|att]  "
-			"[#success | #attempts]\n");
+	p += sprintf(p,
+		     "best   __________rate_________    ________statistics________    ________last_______    ______sum-of________\n");
+	p += sprintf(p,
+		     "rate  [name idx airtime max_tp]  [avg(tp) avg(prob) sd(prob)]  [prob.|retry|suc|att]  [#success | #attempts]\n");
 
 	for (i = 0; i < mi->n_rates; i++) {
 		struct minstrel_rate *mr = &mi->r[i];
@@ -112,7 +110,7 @@
 		prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
 		eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
 
-		p += sprintf(p, "%4u.%1u   %4u.%1u   %3u.%1u    %3u.%1u"
+		p += sprintf(p, "%4u.%1u    %4u.%1u     %3u.%1u    %3u.%1u"
 				"     %3u.%1u %3u   %3u %-3u   "
 				"%9llu   %-9llu\n",
 				tp_max / 10, tp_max % 10,
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 6822ce0..5320e35 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -86,7 +86,7 @@
 		prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
 		eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
 
-		p += sprintf(p, "%4u.%1u   %4u.%1u   %3u.%1u    %3u.%1u"
+		p += sprintf(p, "%4u.%1u    %4u.%1u     %3u.%1u    %3u.%1u"
 				"     %3u.%1u %3u   %3u %-3u   "
 				"%9llu   %-9llu\n",
 				tp_max / 10, tp_max % 10,
@@ -129,12 +129,10 @@
 	p = ms->buf;
 
 	p += sprintf(p, "\n");
-	p += sprintf(p, "              best   ____________rate__________    "
-			"______statistics______    ________last_______    "
-			"______sum-of________\n");
-	p += sprintf(p, "mode guard #  rate  [name   idx airtime  max_tp]  "
-			"[ ø(tp) ø(prob) sd(prob)]  [prob.|retry|suc|att]  [#success | "
-			"#attempts]\n");
+	p += sprintf(p,
+		     "              best   ____________rate__________    ________statistics________    ________last_______    ______sum-of________\n");
+	p += sprintf(p,
+		     "mode guard #  rate  [name   idx airtime  max_tp]  [avg(tp) avg(prob) sd(prob)]  [prob.|retry|suc|att]  [#success | #attempts]\n");
 
 	p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p);
 	for (i = 0; i < MINSTREL_CCK_GROUP; i++)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5bc0b88..8bae5de 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1113,16 +1113,16 @@
 	    is_multicast_ether_addr(hdr->addr1))
 		return RX_CONTINUE;
 
-	if (rx->sta) {
-		if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
-			     rx->sta->last_seq_ctrl[rx->seqno_idx] ==
-			     hdr->seq_ctrl)) {
-			I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
-			rx->sta->num_duplicates++;
-			return RX_DROP_UNUSABLE;
-		} else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
-			rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
-		}
+	if (!rx->sta)
+		return RX_CONTINUE;
+
+	if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
+		     rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
+		I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
+		rx->sta->rx_stats.num_duplicates++;
+		return RX_DROP_UNUSABLE;
+	} else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
+		rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
 	}
 
 	return RX_CONTINUE;
@@ -1396,51 +1396,56 @@
 						NL80211_IFTYPE_ADHOC);
 		if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
 		    test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
-			sta->last_rx = jiffies;
+			sta->rx_stats.last_rx = jiffies;
 			if (ieee80211_is_data(hdr->frame_control) &&
 			    !is_multicast_ether_addr(hdr->addr1)) {
-				sta->last_rx_rate_idx = status->rate_idx;
-				sta->last_rx_rate_flag = status->flag;
-				sta->last_rx_rate_vht_flag = status->vht_flag;
-				sta->last_rx_rate_vht_nss = status->vht_nss;
+				sta->rx_stats.last_rate_idx =
+					status->rate_idx;
+				sta->rx_stats.last_rate_flag =
+					status->flag;
+				sta->rx_stats.last_rate_vht_flag =
+					status->vht_flag;
+				sta->rx_stats.last_rate_vht_nss =
+					status->vht_nss;
 			}
 		}
 	} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
-		sta->last_rx = jiffies;
+		sta->rx_stats.last_rx = jiffies;
 	} else if (!is_multicast_ether_addr(hdr->addr1)) {
 		/*
 		 * Mesh beacons will update last_rx when if they are found to
 		 * match the current local configuration when processed.
 		 */
-		sta->last_rx = jiffies;
+		sta->rx_stats.last_rx = jiffies;
 		if (ieee80211_is_data(hdr->frame_control)) {
-			sta->last_rx_rate_idx = status->rate_idx;
-			sta->last_rx_rate_flag = status->flag;
-			sta->last_rx_rate_vht_flag = status->vht_flag;
-			sta->last_rx_rate_vht_nss = status->vht_nss;
+			sta->rx_stats.last_rate_idx = status->rate_idx;
+			sta->rx_stats.last_rate_flag = status->flag;
+			sta->rx_stats.last_rate_vht_flag = status->vht_flag;
+			sta->rx_stats.last_rate_vht_nss = status->vht_nss;
 		}
 	}
 
 	if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
 		ieee80211_sta_rx_notify(rx->sdata, hdr);
 
-	sta->rx_fragments++;
-	sta->rx_bytes += rx->skb->len;
+	sta->rx_stats.fragments++;
+	sta->rx_stats.bytes += rx->skb->len;
 	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
-		sta->last_signal = status->signal;
-		ewma_signal_add(&sta->avg_signal, -status->signal);
+		sta->rx_stats.last_signal = status->signal;
+		ewma_signal_add(&sta->rx_stats.avg_signal, -status->signal);
 	}
 
 	if (status->chains) {
-		sta->chains = status->chains;
+		sta->rx_stats.chains = status->chains;
 		for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
 			int signal = status->chain_signal[i];
 
 			if (!(status->chains & BIT(i)))
 				continue;
 
-			sta->chain_signal_last[i] = signal;
-			ewma_signal_add(&sta->chain_signal_avg[i], -signal);
+			sta->rx_stats.chain_signal_last[i] = signal;
+			ewma_signal_add(&sta->rx_stats.chain_signal_avg[i],
+					-signal);
 		}
 	}
 
@@ -1500,7 +1505,7 @@
 		 * Update counter and free packet here to avoid
 		 * counting this as a dropped packed.
 		 */
-		sta->rx_packets++;
+		sta->rx_stats.packets++;
 		dev_kfree_skb(rx->skb);
 		return RX_QUEUED;
 	}
@@ -1922,7 +1927,7 @@
 	ieee80211_led_rx(rx->local);
  out_no_led:
 	if (rx->sta)
-		rx->sta->rx_packets++;
+		rx->sta->rx_stats.packets++;
 	return RX_CONTINUE;
 }
 
@@ -2376,7 +2381,7 @@
 		 * for non-QoS-data frames. Here we know it's a data
 		 * frame, so count MSDUs.
 		 */
-		rx->sta->rx_msdu[rx->seqno_idx]++;
+		rx->sta->rx_stats.msdu[rx->seqno_idx]++;
 	}
 
 	/*
@@ -2413,7 +2418,7 @@
 			skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
 			schedule_work(&local->tdls_chsw_work);
 			if (rx->sta)
-				rx->sta->rx_packets++;
+				rx->sta->rx_stats.packets++;
 
 			return RX_QUEUED;
 		}
@@ -2875,7 +2880,7 @@
 
  handled:
 	if (rx->sta)
-		rx->sta->rx_packets++;
+		rx->sta->rx_stats.packets++;
 	dev_kfree_skb(rx->skb);
 	return RX_QUEUED;
 
@@ -2884,7 +2889,7 @@
 	skb_queue_tail(&sdata->skb_queue, rx->skb);
 	ieee80211_queue_work(&local->hw, &sdata->work);
 	if (rx->sta)
-		rx->sta->rx_packets++;
+		rx->sta->rx_stats.packets++;
 	return RX_QUEUED;
 }
 
@@ -2911,7 +2916,7 @@
 	if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
 			     rx->skb->data, rx->skb->len, 0)) {
 		if (rx->sta)
-			rx->sta->rx_packets++;
+			rx->sta->rx_stats.packets++;
 		dev_kfree_skb(rx->skb);
 		return RX_QUEUED;
 	}
@@ -3030,7 +3035,7 @@
 	skb_queue_tail(&sdata->skb_queue, rx->skb);
 	ieee80211_queue_work(&rx->local->hw, &sdata->work);
 	if (rx->sta)
-		rx->sta->rx_packets++;
+		rx->sta->rx_stats.packets++;
 
 	return RX_QUEUED;
 }
@@ -3112,7 +3117,7 @@
 	case RX_DROP_MONITOR:
 		I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
 		if (rx->sta)
-			rx->sta->rx_dropped++;
+			rx->sta->rx_stats.dropped++;
 		/* fall through */
 	case RX_CONTINUE: {
 		struct ieee80211_rate *rate = NULL;
@@ -3132,7 +3137,7 @@
 	case RX_DROP_UNUSABLE:
 		I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
 		if (rx->sta)
-			rx->sta->rx_dropped++;
+			rx->sta->rx_stats.dropped++;
 		dev_kfree_skb(rx->skb);
 		break;
 	case RX_QUEUED:
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 11d0901..b64fd2b 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -16,7 +16,6 @@
 #include <linux/if_arp.h>
 #include <linux/etherdevice.h>
 #include <linux/rtnetlink.h>
-#include <linux/pm_qos.h>
 #include <net/sch_generic.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -67,24 +66,23 @@
 	struct cfg80211_bss *cbss;
 	struct ieee80211_bss *bss;
 	int clen, srlen;
-	enum nl80211_bss_scan_width scan_width;
-	s32 signal = 0;
+	struct cfg80211_inform_bss bss_meta = {};
 	bool signal_valid;
 
 	if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
-		signal = rx_status->signal * 100;
+		bss_meta.signal = rx_status->signal * 100;
 	else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC))
-		signal = (rx_status->signal * 100) / local->hw.max_signal;
+		bss_meta.signal = (rx_status->signal * 100) / local->hw.max_signal;
 
-	scan_width = NL80211_BSS_CHAN_WIDTH_20;
+	bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_20;
 	if (rx_status->flag & RX_FLAG_5MHZ)
-		scan_width = NL80211_BSS_CHAN_WIDTH_5;
+		bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_5;
 	if (rx_status->flag & RX_FLAG_10MHZ)
-		scan_width = NL80211_BSS_CHAN_WIDTH_10;
+		bss_meta.scan_width = NL80211_BSS_CHAN_WIDTH_10;
 
-	cbss = cfg80211_inform_bss_width_frame(local->hw.wiphy, channel,
-					       scan_width, mgmt, len, signal,
-					       GFP_ATOMIC);
+	bss_meta.chan = channel;
+	cbss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta,
+					      mgmt, len, GFP_ATOMIC);
 	if (!cbss)
 		return NULL;
 	/* In case the signal is invalid update the status */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 64f1936..f91d187 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -303,7 +303,6 @@
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_hw *hw = &local->hw;
 	struct sta_info *sta;
-	struct timespec uptime;
 	int i;
 
 	sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp);
@@ -332,18 +331,17 @@
 	memcpy(sta->sta.addr, addr, ETH_ALEN);
 	sta->local = local;
 	sta->sdata = sdata;
-	sta->last_rx = jiffies;
+	sta->rx_stats.last_rx = jiffies;
 
 	sta->sta_state = IEEE80211_STA_NONE;
 
 	/* Mark TID as unreserved */
 	sta->reserved_tid = IEEE80211_TID_UNRESERVED;
 
-	ktime_get_ts(&uptime);
-	sta->last_connected = uptime.tv_sec;
-	ewma_signal_init(&sta->avg_signal);
-	for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
-		ewma_signal_init(&sta->chain_signal_avg[i]);
+	sta->last_connected = ktime_get_seconds();
+	ewma_signal_init(&sta->rx_stats.avg_signal);
+	for (i = 0; i < ARRAY_SIZE(sta->rx_stats.chain_signal_avg); i++)
+		ewma_signal_init(&sta->rx_stats.chain_signal_avg[i]);
 
 	if (local->ops->wake_tx_queue) {
 		void *txq_data;
@@ -1068,7 +1066,7 @@
 		if (sdata != sta->sdata)
 			continue;
 
-		if (time_after(jiffies, sta->last_rx + exp_time)) {
+		if (time_after(jiffies, sta->rx_stats.last_rx + exp_time)) {
 			sta_dbg(sta->sdata, "expiring inactive STA %pM\n",
 				sta->sta.addr);
 
@@ -1808,12 +1806,50 @@
 			>> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT) + 1;
 }
 
+static void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
+{
+	rinfo->flags = 0;
+
+	if (sta->rx_stats.last_rate_flag & RX_FLAG_HT) {
+		rinfo->flags |= RATE_INFO_FLAGS_MCS;
+		rinfo->mcs = sta->rx_stats.last_rate_idx;
+	} else if (sta->rx_stats.last_rate_flag & RX_FLAG_VHT) {
+		rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+		rinfo->nss = sta->rx_stats.last_rate_vht_nss;
+		rinfo->mcs = sta->rx_stats.last_rate_idx;
+	} else {
+		struct ieee80211_supported_band *sband;
+		int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+		u16 brate;
+
+		sband = sta->local->hw.wiphy->bands[
+				ieee80211_get_sdata_band(sta->sdata)];
+		brate = sband->bitrates[sta->rx_stats.last_rate_idx].bitrate;
+		rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
+	}
+
+	if (sta->rx_stats.last_rate_flag & RX_FLAG_SHORT_GI)
+		rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+	if (sta->rx_stats.last_rate_flag & RX_FLAG_5MHZ)
+		rinfo->bw = RATE_INFO_BW_5;
+	else if (sta->rx_stats.last_rate_flag & RX_FLAG_10MHZ)
+		rinfo->bw = RATE_INFO_BW_10;
+	else if (sta->rx_stats.last_rate_flag & RX_FLAG_40MHZ)
+		rinfo->bw = RATE_INFO_BW_40;
+	else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_80MHZ)
+		rinfo->bw = RATE_INFO_BW_80;
+	else if (sta->rx_stats.last_rate_vht_flag & RX_VHT_FLAG_160MHZ)
+		rinfo->bw = RATE_INFO_BW_160;
+	else
+		rinfo->bw = RATE_INFO_BW_20;
+}
+
 void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	struct ieee80211_local *local = sdata->local;
 	struct rate_control_ref *ref = NULL;
-	struct timespec uptime;
 	u32 thr = 0;
 	int i, ac;
 
@@ -1835,51 +1871,54 @@
 			 BIT(NL80211_STA_INFO_STA_FLAGS) |
 			 BIT(NL80211_STA_INFO_BSS_PARAM) |
 			 BIT(NL80211_STA_INFO_CONNECTED_TIME) |
-			 BIT(NL80211_STA_INFO_RX_DROP_MISC) |
-			 BIT(NL80211_STA_INFO_BEACON_LOSS);
+			 BIT(NL80211_STA_INFO_RX_DROP_MISC);
 
-	ktime_get_ts(&uptime);
-	sinfo->connected_time = uptime.tv_sec - sta->last_connected;
-	sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
+	if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+		sinfo->beacon_loss_count = sdata->u.mgd.beacon_loss_count;
+		sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_LOSS);
+	}
+
+	sinfo->connected_time = ktime_get_seconds() - sta->last_connected;
+	sinfo->inactive_time =
+		jiffies_to_msecs(jiffies - sta->rx_stats.last_rx);
 
 	if (!(sinfo->filled & (BIT(NL80211_STA_INFO_TX_BYTES64) |
 			       BIT(NL80211_STA_INFO_TX_BYTES)))) {
 		sinfo->tx_bytes = 0;
 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
-			sinfo->tx_bytes += sta->tx_bytes[ac];
+			sinfo->tx_bytes += sta->tx_stats.bytes[ac];
 		sinfo->filled |= BIT(NL80211_STA_INFO_TX_BYTES64);
 	}
 
 	if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_PACKETS))) {
 		sinfo->tx_packets = 0;
 		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
-			sinfo->tx_packets += sta->tx_packets[ac];
+			sinfo->tx_packets += sta->tx_stats.packets[ac];
 		sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
 	}
 
 	if (!(sinfo->filled & (BIT(NL80211_STA_INFO_RX_BYTES64) |
 			       BIT(NL80211_STA_INFO_RX_BYTES)))) {
-		sinfo->rx_bytes = sta->rx_bytes;
+		sinfo->rx_bytes = sta->rx_stats.bytes;
 		sinfo->filled |= BIT(NL80211_STA_INFO_RX_BYTES64);
 	}
 
 	if (!(sinfo->filled & BIT(NL80211_STA_INFO_RX_PACKETS))) {
-		sinfo->rx_packets = sta->rx_packets;
+		sinfo->rx_packets = sta->rx_stats.packets;
 		sinfo->filled |= BIT(NL80211_STA_INFO_RX_PACKETS);
 	}
 
 	if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_RETRIES))) {
-		sinfo->tx_retries = sta->tx_retry_count;
+		sinfo->tx_retries = sta->status_stats.retry_count;
 		sinfo->filled |= BIT(NL80211_STA_INFO_TX_RETRIES);
 	}
 
 	if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_FAILED))) {
-		sinfo->tx_failed = sta->tx_retry_failed;
+		sinfo->tx_failed = sta->status_stats.retry_failed;
 		sinfo->filled |= BIT(NL80211_STA_INFO_TX_FAILED);
 	}
 
-	sinfo->rx_dropped_misc = sta->rx_dropped;
-	sinfo->beacon_loss_count = sta->beacon_loss_count;
+	sinfo->rx_dropped_misc = sta->rx_stats.dropped;
 
 	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
 	    !(sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER)) {
@@ -1891,33 +1930,35 @@
 	if (ieee80211_hw_check(&sta->local->hw, SIGNAL_DBM) ||
 	    ieee80211_hw_check(&sta->local->hw, SIGNAL_UNSPEC)) {
 		if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL))) {
-			sinfo->signal = (s8)sta->last_signal;
+			sinfo->signal = (s8)sta->rx_stats.last_signal;
 			sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
 		}
 
 		if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
 			sinfo->signal_avg =
-				(s8) -ewma_signal_read(&sta->avg_signal);
+				-ewma_signal_read(&sta->rx_stats.avg_signal);
 			sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
 		}
 	}
 
-	if (sta->chains &&
+	if (sta->rx_stats.chains &&
 	    !(sinfo->filled & (BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
 			       BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG)))) {
 		sinfo->filled |= BIT(NL80211_STA_INFO_CHAIN_SIGNAL) |
 				 BIT(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
 
-		sinfo->chains = sta->chains;
+		sinfo->chains = sta->rx_stats.chains;
 		for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
-			sinfo->chain_signal[i] = sta->chain_signal_last[i];
+			sinfo->chain_signal[i] =
+				sta->rx_stats.chain_signal_last[i];
 			sinfo->chain_signal_avg[i] =
-				(s8) -ewma_signal_read(&sta->chain_signal_avg[i]);
+				-ewma_signal_read(&sta->rx_stats.chain_signal_avg[i]);
 		}
 	}
 
 	if (!(sinfo->filled & BIT(NL80211_STA_INFO_TX_BITRATE))) {
-		sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate);
+		sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate,
+				     &sinfo->txrate);
 		sinfo->filled |= BIT(NL80211_STA_INFO_TX_BITRATE);
 	}
 
@@ -1932,12 +1973,12 @@
 
 		if (!(tidstats->filled & BIT(NL80211_TID_STATS_RX_MSDU))) {
 			tidstats->filled |= BIT(NL80211_TID_STATS_RX_MSDU);
-			tidstats->rx_msdu = sta->rx_msdu[i];
+			tidstats->rx_msdu = sta->rx_stats.msdu[i];
 		}
 
 		if (!(tidstats->filled & BIT(NL80211_TID_STATS_TX_MSDU))) {
 			tidstats->filled |= BIT(NL80211_TID_STATS_TX_MSDU);
-			tidstats->tx_msdu = sta->tx_msdu[i];
+			tidstats->tx_msdu = sta->tx_stats.msdu[i];
 		}
 
 		if (!(tidstats->filled &
@@ -1945,7 +1986,8 @@
 		    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
 			tidstats->filled |=
 				BIT(NL80211_TID_STATS_TX_MSDU_RETRIES);
-			tidstats->tx_msdu_retries = sta->tx_msdu_retries[i];
+			tidstats->tx_msdu_retries =
+				sta->status_stats.msdu_retries[i];
 		}
 
 		if (!(tidstats->filled &
@@ -1953,7 +1995,8 @@
 		    ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
 			tidstats->filled |=
 				BIT(NL80211_TID_STATS_TX_MSDU_FAILED);
-			tidstats->tx_msdu_failed = sta->tx_msdu_failed[i];
+			tidstats->tx_msdu_failed =
+				sta->status_stats.msdu_failed[i];
 		}
 	}
 
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index b087c71..2cafb21 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -133,6 +133,7 @@
  * @buf_size: reorder buffer size at receiver
  * @failed_bar_ssn: ssn of the last failed BAR tx attempt
  * @bar_pending: BAR needs to be re-sent
+ * @amsdu: support A-MSDU withing A-MDPU
  *
  * This structure's lifetime is managed by RCU, assignments to
  * the array holding it must hold the aggregation mutex.
@@ -158,6 +159,7 @@
 
 	u16 failed_bar_ssn;
 	bool bar_pending;
+	bool amsdu;
 };
 
 /**
@@ -342,12 +344,6 @@
  * @rate_ctrl_lock: spinlock used to protect rate control data
  *	(data inside the algorithm, so serializes calls there)
  * @rate_ctrl_priv: rate control private per-STA pointer
- * @last_tx_rate: rate used for last transmit, to report to userspace as
- *	"the" transmit rate
- * @last_rx_rate_idx: rx status rate index of the last data packet
- * @last_rx_rate_flag: rx status flag of the last data packet
- * @last_rx_rate_vht_flag: rx status vht flag of the last data packet
- * @last_rx_rate_vht_nss: rx status nss of last data packet
  * @lock: used for locking all fields that require locking, see comments
  *	in the header file.
  * @drv_deliver_wk: used for delivering frames after driver PS unblocking
@@ -362,23 +358,9 @@
  *	the station when it leaves powersave or polls for frames
  * @driver_buffered_tids: bitmap of TIDs the driver has data buffered on
  * @txq_buffered_tids: bitmap of TIDs that mac80211 has txq data buffered on
- * @rx_packets: Number of MSDUs received from this STA
- * @rx_bytes: Number of bytes received from this STA
- * @last_rx: time (in jiffies) when last frame was received from this STA
  * @last_connected: time (in seconds) when a station got connected
- * @num_duplicates: number of duplicate frames received from this STA
- * @rx_fragments: number of received MPDUs
- * @rx_dropped: number of dropped MPDUs from this STA
- * @last_signal: signal of last received frame from this STA
- * @avg_signal: moving average of signal of received frames from this STA
- * @last_ack_signal: signal of last received Ack frame from this STA
  * @last_seq_ctrl: last received seq/frag number from this STA (per TID
  *	plus one for non-QoS frames)
- * @tx_filtered_count: number of frames the hardware filtered for this STA
- * @tx_retry_failed: number of frames that failed retry
- * @tx_retry_count: total number of retries for frames to this STA
- * @tx_packets: number of RX/TX MSDUs
- * @tx_bytes: number of bytes transmitted to this STA
  * @tid_seq: per-TID sequence numbers for sending to this STA
  * @ampdu_mlme: A-MPDU state machine state
  * @timer_to_tid: identity mapping to ID timers
@@ -386,32 +368,22 @@
  * @debugfs: debug filesystem info
  * @dead: set to true when sta is unlinked
  * @uploaded: set to true when sta is uploaded to the driver
- * @lost_packets: number of consecutive lost packets
  * @sta: station information we share with the driver
  * @sta_state: duplicates information about station state (for debug)
  * @beacon_loss_count: number of times beacon loss has triggered
  * @rcu_head: RCU head used for freeing this station struct
  * @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
  *	taken from HT/VHT capabilities or VHT operating mode notification
- * @chains: chains ever used for RX from this station
- * @chain_signal_last: last signal (per chain)
- * @chain_signal_avg: signal average (per chain)
  * @known_smps_mode: the smps_mode the client thinks we are in. Relevant for
  *	AP only.
  * @cipher_scheme: optional cipher scheme for this station
- * @last_tdls_pkt_time: holds the time in jiffies of last TDLS pkt ACKed
  * @reserved_tid: reserved TID (if any, otherwise IEEE80211_TID_UNRESERVED)
- * @tx_msdu: MSDUs transmitted to this station, using IEEE80211_NUM_TID
- *	entry for non-QoS frames
- * @tx_msdu_retries: MSDU retries for transmissions to to this station,
- *	using IEEE80211_NUM_TID entry for non-QoS frames
- * @tx_msdu_failed: MSDU failures for transmissions to to this station,
- *	using IEEE80211_NUM_TID entry for non-QoS frames
- * @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
- *	entry for non-QoS frames
  * @fast_tx: TX fastpath information
  * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
  *	the BSS one.
+ * @tx_stats: TX statistics
+ * @rx_stats: RX statistics
+ * @status_stats: TX status statistics
  */
 struct sta_info {
 	/* General information, mostly static */
@@ -455,42 +427,49 @@
 	unsigned long driver_buffered_tids;
 	unsigned long txq_buffered_tids;
 
-	/* Updated from RX path only, no locking requirements */
-	unsigned long rx_packets;
-	u64 rx_bytes;
-	unsigned long last_rx;
 	long last_connected;
-	unsigned long num_duplicates;
-	unsigned long rx_fragments;
-	unsigned long rx_dropped;
-	int last_signal;
-	struct ewma_signal avg_signal;
-	int last_ack_signal;
 
-	u8 chains;
-	s8 chain_signal_last[IEEE80211_MAX_CHAINS];
-	struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
+	/* Updated from RX path only, no locking requirements */
+	struct {
+		unsigned long packets;
+		u64 bytes;
+		unsigned long last_rx;
+		unsigned long num_duplicates;
+		unsigned long fragments;
+		unsigned long dropped;
+		int last_signal;
+		struct ewma_signal avg_signal;
+		u8 chains;
+		s8 chain_signal_last[IEEE80211_MAX_CHAINS];
+		struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
+		int last_rate_idx;
+		u32 last_rate_flag;
+		u32 last_rate_vht_flag;
+		u8 last_rate_vht_nss;
+		u64 msdu[IEEE80211_NUM_TIDS + 1];
+	} rx_stats;
 
 	/* Plus 1 for non-QoS frames */
 	__le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
 
 	/* Updated from TX status path only, no locking requirements */
-	unsigned long tx_filtered_count;
-	unsigned long tx_retry_failed, tx_retry_count;
+	struct {
+		unsigned long filtered;
+		unsigned long retry_failed, retry_count;
+		unsigned int lost_packets;
+		unsigned long last_tdls_pkt_time;
+		u64 msdu_retries[IEEE80211_NUM_TIDS + 1];
+		u64 msdu_failed[IEEE80211_NUM_TIDS + 1];
+	} status_stats;
 
 	/* Updated from TX path only, no locking requirements */
-	u64 tx_packets[IEEE80211_NUM_ACS];
-	u64 tx_bytes[IEEE80211_NUM_ACS];
-	struct ieee80211_tx_rate last_tx_rate;
-	int last_rx_rate_idx;
-	u32 last_rx_rate_flag;
-	u32 last_rx_rate_vht_flag;
-	u8 last_rx_rate_vht_nss;
+	struct {
+		u64 packets[IEEE80211_NUM_ACS];
+		u64 bytes[IEEE80211_NUM_ACS];
+		struct ieee80211_tx_rate last_rate;
+		u64 msdu[IEEE80211_NUM_TIDS + 1];
+	} tx_stats;
 	u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
-	u64 tx_msdu[IEEE80211_NUM_TIDS + 1];
-	u64 tx_msdu_retries[IEEE80211_NUM_TIDS + 1];
-	u64 tx_msdu_failed[IEEE80211_NUM_TIDS + 1];
-	u64 rx_msdu[IEEE80211_NUM_TIDS + 1];
 
 	/*
 	 * Aggregation information, locked with lock.
@@ -507,15 +486,9 @@
 
 	enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
 
-	unsigned int lost_packets;
-	unsigned int beacon_loss_count;
-
 	enum ieee80211_smps_mode known_smps_mode;
 	const struct ieee80211_cipher_scheme *cipher_scheme;
 
-	/* TDLS timeout data */
-	unsigned long last_tdls_pkt_time;
-
 	u8 reserved_tid;
 
 	struct cfg80211_chan_def tdls_chandef;
@@ -686,8 +659,6 @@
 void sta_set_rate_info_tx(struct sta_info *sta,
 			  const struct ieee80211_tx_rate *rate,
 			  struct rate_info *rinfo);
-void sta_set_rate_info_rx(struct sta_info *sta,
-			  struct rate_info *rinfo);
 void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo);
 
 void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 8ba5832..5bad05e 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -67,7 +67,7 @@
 		       IEEE80211_TX_INTFL_RETRANSMISSION;
 	info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
 
-	sta->tx_filtered_count++;
+	sta->status_stats.filtered++;
 
 	/*
 	 * Clear more-data bit on filtered frames, it might be set
@@ -101,6 +101,7 @@
 	 * when it wakes up for the next time.
 	 */
 	set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
+	ieee80211_clear_fast_xmit(sta);
 
 	/*
 	 * This code races in the following way:
@@ -182,7 +183,7 @@
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 
 	if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
-		sta->last_rx = jiffies;
+		sta->rx_stats.last_rx = jiffies;
 
 	if (ieee80211_is_data_qos(mgmt->frame_control)) {
 		struct ieee80211_hdr *hdr = (void *) skb->data;
@@ -556,8 +557,9 @@
 	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
 		return;
 
-	sta->lost_packets++;
-	if (!sta->sta.tdls && sta->lost_packets < STA_LOST_PKT_THRESHOLD)
+	sta->status_stats.lost_packets++;
+	if (!sta->sta.tdls &&
+	    sta->status_stats.lost_packets < STA_LOST_PKT_THRESHOLD)
 		return;
 
 	/*
@@ -567,14 +569,15 @@
 	 * mechanism.
 	 */
 	if (sta->sta.tdls &&
-	    (sta->lost_packets < STA_LOST_TDLS_PKT_THRESHOLD ||
+	    (sta->status_stats.lost_packets < STA_LOST_TDLS_PKT_THRESHOLD ||
 	     time_before(jiffies,
-			 sta->last_tdls_pkt_time + STA_LOST_TDLS_PKT_TIME)))
+			 sta->status_stats.last_tdls_pkt_time +
+			 STA_LOST_TDLS_PKT_TIME)))
 		return;
 
 	cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
-				    sta->lost_packets, GFP_ATOMIC);
-	sta->lost_packets = 0;
+				    sta->status_stats.lost_packets, GFP_ATOMIC);
+	sta->status_stats.lost_packets = 0;
 }
 
 static int ieee80211_tx_get_rates(struct ieee80211_hw *hw,
@@ -635,18 +638,18 @@
 		sta = container_of(pubsta, struct sta_info, sta);
 
 		if (!acked)
-			sta->tx_retry_failed++;
-		sta->tx_retry_count += retry_count;
+			sta->status_stats.retry_failed++;
+		sta->status_stats.retry_count += retry_count;
 
 		if (acked) {
-			sta->last_rx = jiffies;
+			sta->rx_stats.last_rx = jiffies;
 
-			if (sta->lost_packets)
-				sta->lost_packets = 0;
+			if (sta->status_stats.lost_packets)
+				sta->status_stats.lost_packets = 0;
 
 			/* Track when last TDLS packet was ACKed */
 			if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
-				sta->last_tdls_pkt_time = jiffies;
+				sta->status_stats.last_tdls_pkt_time = jiffies;
 		} else {
 			ieee80211_lost_packet(sta, info);
 		}
@@ -668,16 +671,70 @@
 }
 EXPORT_SYMBOL(ieee80211_tx_status_noskb);
 
-void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
+			  struct ieee80211_supported_band *sband,
+			  int retry_count, int shift, bool send_to_cooked)
 {
 	struct sk_buff *skb2;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_sub_if_data *sdata;
+	struct net_device *prev_dev = NULL;
+	int rtap_len;
+
+	/* send frame to monitor interfaces now */
+	rtap_len = ieee80211_tx_radiotap_len(info);
+	if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
+		pr_err("ieee80211_tx_status: headroom too small\n");
+		dev_kfree_skb(skb);
+		return;
+	}
+	ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
+					 rtap_len, shift);
+
+	/* XXX: is this sufficient for BPF? */
+	skb_set_mac_header(skb, 0);
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb->pkt_type = PACKET_OTHERHOST;
+	skb->protocol = htons(ETH_P_802_2);
+	memset(skb->cb, 0, sizeof(skb->cb));
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+			if (!ieee80211_sdata_running(sdata))
+				continue;
+
+			if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
+			    !send_to_cooked)
+				continue;
+
+			if (prev_dev) {
+				skb2 = skb_clone(skb, GFP_ATOMIC);
+				if (skb2) {
+					skb2->dev = prev_dev;
+					netif_rx(skb2);
+				}
+			}
+
+			prev_dev = sdata->dev;
+		}
+	}
+	if (prev_dev) {
+		skb->dev = prev_dev;
+		netif_rx(skb);
+		skb = NULL;
+	}
+	rcu_read_unlock();
+	dev_kfree_skb(skb);
+}
+
+void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	__le16 fc;
 	struct ieee80211_supported_band *sband;
-	struct ieee80211_sub_if_data *sdata;
-	struct net_device *prev_dev = NULL;
 	struct sta_info *sta;
 	struct rhash_head *tmp;
 	int retry_count;
@@ -685,7 +742,6 @@
 	bool send_to_cooked;
 	bool acked;
 	struct ieee80211_bar *bar;
-	int rtap_len;
 	int shift = 0;
 	int tid = IEEE80211_NUM_TIDS;
 	const struct bucket_table *tbl;
@@ -730,7 +786,8 @@
 		if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) &&
 		    (ieee80211_is_data(hdr->frame_control)) &&
 		    (rates_idx != -1))
-			sta->last_tx_rate = info->status.rates[rates_idx];
+			sta->tx_stats.last_rate =
+				info->status.rates[rates_idx];
 
 		if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
 		    (ieee80211_is_data_qos(fc))) {
@@ -776,13 +833,15 @@
 			return;
 		} else {
 			if (!acked)
-				sta->tx_retry_failed++;
-			sta->tx_retry_count += retry_count;
+				sta->status_stats.retry_failed++;
+			sta->status_stats.retry_count += retry_count;
 
 			if (ieee80211_is_data_present(fc)) {
 				if (!acked)
-					sta->tx_msdu_failed[tid]++;
-				sta->tx_msdu_retries[tid] += retry_count;
+					sta->status_stats.msdu_failed[tid]++;
+
+				sta->status_stats.msdu_retries[tid] +=
+					retry_count;
 			}
 		}
 
@@ -800,19 +859,17 @@
 
 		if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
 			if (info->flags & IEEE80211_TX_STAT_ACK) {
-				if (sta->lost_packets)
-					sta->lost_packets = 0;
+				if (sta->status_stats.lost_packets)
+					sta->status_stats.lost_packets = 0;
 
 				/* Track when last TDLS packet was ACKed */
 				if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
-					sta->last_tdls_pkt_time = jiffies;
+					sta->status_stats.last_tdls_pkt_time =
+						jiffies;
 			} else {
 				ieee80211_lost_packet(sta, info);
 			}
 		}
-
-		if (acked)
-			sta->last_ack_signal = info->status.ack_signal;
 	}
 
 	rcu_read_unlock();
@@ -878,51 +935,8 @@
 		return;
 	}
 
-	/* send frame to monitor interfaces now */
-	rtap_len = ieee80211_tx_radiotap_len(info);
-	if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
-		pr_err("ieee80211_tx_status: headroom too small\n");
-		dev_kfree_skb(skb);
-		return;
-	}
-	ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
-					 rtap_len, shift);
-
-	/* XXX: is this sufficient for BPF? */
-	skb_set_mac_header(skb, 0);
-	skb->ip_summed = CHECKSUM_UNNECESSARY;
-	skb->pkt_type = PACKET_OTHERHOST;
-	skb->protocol = htons(ETH_P_802_2);
-	memset(skb->cb, 0, sizeof(skb->cb));
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-		if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
-			if (!ieee80211_sdata_running(sdata))
-				continue;
-
-			if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
-			    !send_to_cooked)
-				continue;
-
-			if (prev_dev) {
-				skb2 = skb_clone(skb, GFP_ATOMIC);
-				if (skb2) {
-					skb2->dev = prev_dev;
-					netif_rx(skb2);
-				}
-			}
-
-			prev_dev = sdata->dev;
-		}
-	}
-	if (prev_dev) {
-		skb->dev = prev_dev;
-		netif_rx(skb);
-		skb = NULL;
-	}
-	rcu_read_unlock();
-	dev_kfree_skb(skb);
+	/* send to monitor interfaces */
+	ieee80211_tx_monitor(local, skb, sband, retry_count, shift, send_to_cooked);
 }
 EXPORT_SYMBOL(ieee80211_tx_status);
 
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 4e202d0..ecc5e2a 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -41,9 +41,11 @@
 					 struct sk_buff *skb)
 {
 	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	bool chan_switch = local->hw.wiphy->features &
 			   NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
-	bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW);
+	bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
+			  !ifmgd->tdls_wider_bw_prohibited;
 	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
 	struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
 	bool vht = sband && sband->vht_cap.vht_supported;
@@ -331,8 +333,8 @@
 
 	/* proceed to downgrade the chandef until usable or the same */
 	while (uc.width > max_width &&
-	       !cfg80211_reg_can_beacon(sdata->local->hw.wiphy,
-					&uc, sdata->wdev.iftype))
+	       !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
+					      sdata->wdev.iftype))
 		ieee80211_chandef_downgrade(&uc);
 
 	if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 6f14591..5cf8f4e 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -325,7 +325,6 @@
 		__field(u32, flags)
 		__field(int, power_level)
 		__field(int, dynamic_ps_timeout)
-		__field(int, max_sleep_period)
 		__field(u16, listen_interval)
 		__field(u8, long_frame_max_tx_count)
 		__field(u8, short_frame_max_tx_count)
@@ -339,7 +338,6 @@
 		__entry->flags = local->hw.conf.flags;
 		__entry->power_level = local->hw.conf.power_level;
 		__entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
-		__entry->max_sleep_period = local->hw.conf.max_sleep_period;
 		__entry->listen_interval = local->hw.conf.listen_interval;
 		__entry->long_frame_max_tx_count =
 			local->hw.conf.long_frame_max_tx_count;
@@ -497,6 +495,36 @@
 	)
 );
 
+TRACE_EVENT(drv_config_iface_filter,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 unsigned int filter_flags,
+		 unsigned int changed_flags),
+
+	TP_ARGS(local, sdata, filter_flags, changed_flags),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		VIF_ENTRY
+		__field(unsigned int, filter_flags)
+		__field(unsigned int, changed_flags)
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		VIF_ASSIGN;
+		__entry->filter_flags = filter_flags;
+		__entry->changed_flags = changed_flags;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT VIF_PR_FMT
+		" filter_flags: %#x changed_flags: %#x",
+		LOCAL_PR_ARG, VIF_PR_ARG, __entry->filter_flags,
+		__entry->changed_flags
+	)
+);
+
 TRACE_EVENT(drv_set_tim,
 	TP_PROTO(struct ieee80211_local *local,
 		 struct ieee80211_sta *sta, bool set),
@@ -944,9 +972,9 @@
 		 struct ieee80211_sub_if_data *sdata,
 		 enum ieee80211_ampdu_mlme_action action,
 		 struct ieee80211_sta *sta, u16 tid,
-		 u16 *ssn, u8 buf_size),
+		 u16 *ssn, u8 buf_size, bool amsdu),
 
-	TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size),
+	TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size, amsdu),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
@@ -955,6 +983,7 @@
 		__field(u16, tid)
 		__field(u16, ssn)
 		__field(u8, buf_size)
+		__field(bool, amsdu)
 		VIF_ENTRY
 	),
 
@@ -966,12 +995,13 @@
 		__entry->tid = tid;
 		__entry->ssn = ssn ? *ssn : 0;
 		__entry->buf_size = buf_size;
+		__entry->amsdu = amsdu;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d",
+		LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d amsdu:%d",
 		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action,
-		__entry->tid, __entry->buf_size
+		__entry->tid, __entry->buf_size, __entry->amsdu
 	)
 );
 
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 84e0e8c..bdc224d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -757,9 +757,9 @@
 	if (txrc.reported_rate.idx < 0) {
 		txrc.reported_rate = tx->rate;
 		if (tx->sta && ieee80211_is_data(hdr->frame_control))
-			tx->sta->last_tx_rate = txrc.reported_rate;
+			tx->sta->tx_stats.last_rate = txrc.reported_rate;
 	} else if (tx->sta)
-		tx->sta->last_tx_rate = txrc.reported_rate;
+		tx->sta->tx_stats.last_rate = txrc.reported_rate;
 
 	if (ratetbl)
 		return TX_CONTINUE;
@@ -824,7 +824,7 @@
 		hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
 		tx->sdata->sequence_number += 0x10;
 		if (tx->sta)
-			tx->sta->tx_msdu[IEEE80211_NUM_TIDS]++;
+			tx->sta->tx_stats.msdu[IEEE80211_NUM_TIDS]++;
 		return TX_CONTINUE;
 	}
 
@@ -840,7 +840,7 @@
 
 	qc = ieee80211_get_qos_ctl(hdr);
 	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
-	tx->sta->tx_msdu[tid]++;
+	tx->sta->tx_stats.msdu[tid]++;
 
 	if (!tx->sta->sta.txq[0])
 		hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
@@ -994,10 +994,10 @@
 
 	skb_queue_walk(&tx->skbs, skb) {
 		ac = skb_get_queue_mapping(skb);
-		tx->sta->tx_bytes[ac] += skb->len;
+		tx->sta->tx_stats.bytes[ac] += skb->len;
 	}
 	if (ac >= 0)
-		tx->sta->tx_packets[ac]++;
+		tx->sta->tx_stats.packets[ac]++;
 
 	return TX_CONTINUE;
 }
@@ -1218,8 +1218,10 @@
 
 	if (!tx->sta)
 		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
-	else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT))
+	else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) {
 		info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+		ieee80211_check_fast_xmit(tx->sta);
+	}
 
 	info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
 
@@ -2451,7 +2453,8 @@
 
 	if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
 	    test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
-	    test_sta_flag(sta, WLAN_STA_PS_DELIVER))
+	    test_sta_flag(sta, WLAN_STA_PS_DELIVER) ||
+	    test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT))
 		goto out;
 
 	if (sdata->noack_map)
@@ -2767,7 +2770,8 @@
 
 	if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
 		*ieee80211_get_qos_ctl(hdr) = tid;
-		hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
+		if (!sta->sta.txq[0])
+			hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
 	} else {
 		info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
 		hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
@@ -2775,10 +2779,10 @@
 	}
 
 	if (skb_shinfo(skb)->gso_size)
-		sta->tx_msdu[tid] +=
+		sta->tx_stats.msdu[tid] +=
 			DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
 	else
-		sta->tx_msdu[tid]++;
+		sta->tx_stats.msdu[tid]++;
 
 	info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
 
@@ -2809,8 +2813,8 @@
 	/* statistics normally done by ieee80211_tx_h_stats (but that
 	 * has to consider fragmentation, so is more complex)
 	 */
-	sta->tx_bytes[skb_get_queue_mapping(skb)] += skb->len;
-	sta->tx_packets[skb_get_queue_mapping(skb)]++;
+	sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
+	sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
 
 	if (fast_tx->pn_offs) {
 		u64 pn;
@@ -3512,6 +3516,12 @@
 {
 	struct ieee80211_mutable_offsets offs = {};
 	struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false);
+	struct sk_buff *copy;
+	struct ieee80211_supported_band *sband;
+	int shift;
+
+	if (!bcn)
+		return bcn;
 
 	if (tim_offset)
 		*tim_offset = offs.tim_offset;
@@ -3519,6 +3529,19 @@
 	if (tim_length)
 		*tim_length = offs.tim_length;
 
+	if (ieee80211_hw_check(hw, BEACON_TX_STATUS) ||
+	    !hw_to_local(hw)->monitors)
+		return bcn;
+
+	/* send a copy to monitor interfaces */
+	copy = skb_copy(bcn, GFP_ATOMIC);
+	if (!copy)
+		return bcn;
+
+	shift = ieee80211_vif_get_shift(vif);
+	sband = hw->wiphy->bands[ieee80211_get_sdata_band(vif_to_sdata(vif))];
+	ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false);
+
 	return bcn;
 }
 EXPORT_SYMBOL(ieee80211_beacon_get_tim);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 1104421..8274c86 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1951,7 +1951,7 @@
 		}
 	}
 
-	ieee80211_recalc_ps(local, -1);
+	ieee80211_recalc_ps(local);
 
 	/*
 	 * The sta might be in psm against the ap (e.g. because
@@ -1966,7 +1966,7 @@
 			if (!sdata->u.mgd.associated)
 				continue;
 
-			ieee80211_send_nullfunc(local, sdata, 0);
+			ieee80211_send_nullfunc(local, sdata, false);
 		}
 	}
 
@@ -2017,8 +2017,9 @@
 		mutex_lock(&local->sta_mtx);
 
 		list_for_each_entry(sta, &local->sta_list, list) {
-			ieee80211_sta_tear_down_BA_sessions(
-					sta, AGG_STOP_LOCAL_REQUEST);
+			if (!local->resuming)
+				ieee80211_sta_tear_down_BA_sessions(
+						sta, AGG_STOP_LOCAL_REQUEST);
 			clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
 		}
 
@@ -2041,9 +2042,13 @@
 	if (sched_scan_sdata && sched_scan_req)
 		/*
 		 * Sched scan stopped, but we don't want to report it. Instead,
-		 * we're trying to reschedule.
+		 * we're trying to reschedule. However, if more than one scan
+		 * plan was set, we cannot reschedule since we don't know which
+		 * scan plan was currently running (and some scan plans may have
+		 * already finished).
 		 */
-		if (__ieee80211_request_sched_scan_start(sched_scan_sdata,
+		if (sched_scan_req->n_scan_plans > 1 ||
+		    __ieee80211_request_sched_scan_start(sched_scan_sdata,
 							 sched_scan_req))
 			sched_scan_stopped = true;
 	mutex_unlock(&local->mtx);
@@ -2324,6 +2329,8 @@
 	if (chandef->center_freq2)
 		vht_oper->center_freq_seg2_idx =
 			ieee80211_frequency_to_channel(chandef->center_freq2);
+	else
+		vht_oper->center_freq_seg2_idx = 0x00;
 
 	switch (chandef->width) {
 	case NL80211_CHAN_WIDTH_160:
@@ -2541,7 +2548,7 @@
 		/* non-managed type inferfaces */
 		return 0;
 	}
-	return ifmgd->ave_beacon_signal / 16;
+	return -ewma_beacon_signal_read(&ifmgd->ave_beacon_signal);
 }
 EXPORT_SYMBOL_GPL(ieee80211_ave_rssi);
 
@@ -3298,9 +3305,11 @@
 	if (sta) {
 		txqi->txq.sta = &sta->sta;
 		sta->sta.txq[tid] = &txqi->txq;
+		txqi->txq.tid = tid;
 		txqi->txq.ac = ieee802_1d_to_ac[tid & 7];
 	} else {
 		sdata->vif.txq = &txqi->txq;
+		txqi->txq.tid = 0;
 		txqi->txq.ac = IEEE80211_AC_BE;
 	}
 }
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index feb547d..d824c38 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -174,9 +174,12 @@
 	 * a driver that supports HW encryption. Send up the key idx only if
 	 * the key is set.
 	 */
-	mac80211_ev_michael_mic_failure(rx->sdata,
-					rx->key ? rx->key->conf.keyidx : -1,
-					(void *) skb->data, NULL, GFP_ATOMIC);
+	cfg80211_michael_mic_failure(rx->sdata->dev, hdr->addr2,
+				     is_multicast_ether_addr(hdr->addr1) ?
+				     NL80211_KEYTYPE_GROUP :
+				     NL80211_KEYTYPE_PAIRWISE,
+				     rx->key ? rx->key->conf.keyidx : -1,
+				     NULL, GFP_ATOMIC);
 	return RX_DROP_UNUSABLE;
 }
 
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index c865ebb..57b5e94 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -266,6 +266,195 @@
 	return 0;
 }
 
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+static void
+ieee802154_get_llsec_table(struct wpan_phy *wpan_phy,
+			   struct wpan_dev *wpan_dev,
+			   struct ieee802154_llsec_table **table)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+
+	*table = &sdata->sec.table;
+}
+
+static void
+ieee802154_lock_llsec_table(struct wpan_phy *wpan_phy,
+			    struct wpan_dev *wpan_dev)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+
+	mutex_lock(&sdata->sec_mtx);
+}
+
+static void
+ieee802154_unlock_llsec_table(struct wpan_phy *wpan_phy,
+			      struct wpan_dev *wpan_dev)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+
+	mutex_unlock(&sdata->sec_mtx);
+}
+
+static int
+ieee802154_set_llsec_params(struct wpan_phy *wpan_phy,
+			    struct wpan_dev *wpan_dev,
+			    const struct ieee802154_llsec_params *params,
+			    int changed)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_set_params(&sdata->sec, params, changed);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_get_llsec_params(struct wpan_phy *wpan_phy,
+			    struct wpan_dev *wpan_dev,
+			    struct ieee802154_llsec_params *params)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_get_params(&sdata->sec, params);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_add_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+			 const struct ieee802154_llsec_key_id *id,
+			 const struct ieee802154_llsec_key *key)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_key_add(&sdata->sec, id, key);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_del_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+			 const struct ieee802154_llsec_key_id *id)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_key_del(&sdata->sec, id);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_add_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+			const struct ieee802154_llsec_seclevel *sl)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_seclevel_add(&sdata->sec, sl);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_del_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+			const struct ieee802154_llsec_seclevel *sl)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_seclevel_del(&sdata->sec, sl);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_add_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+		      const struct ieee802154_llsec_device *dev_desc)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_dev_add(&sdata->sec, dev_desc);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_del_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+		      __le64 extended_addr)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_dev_del(&sdata->sec, extended_addr);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_add_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+		      __le64 extended_addr,
+		      const struct ieee802154_llsec_device_key *key)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_devkey_add(&sdata->sec, extended_addr, key);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+
+static int
+ieee802154_del_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+		      __le64 extended_addr,
+		      const struct ieee802154_llsec_device_key *key)
+{
+	struct net_device *dev = wpan_dev->netdev;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	int res;
+
+	mutex_lock(&sdata->sec_mtx);
+	res = mac802154_llsec_devkey_del(&sdata->sec, extended_addr, key);
+	mutex_unlock(&sdata->sec_mtx);
+
+	return res;
+}
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
+
 const struct cfg802154_ops mac802154_config_ops = {
 	.add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
 	.del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
@@ -284,4 +473,20 @@
 	.set_max_frame_retries = ieee802154_set_max_frame_retries,
 	.set_lbt_mode = ieee802154_set_lbt_mode,
 	.set_ackreq_default = ieee802154_set_ackreq_default,
+#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+	.get_llsec_table = ieee802154_get_llsec_table,
+	.lock_llsec_table = ieee802154_lock_llsec_table,
+	.unlock_llsec_table = ieee802154_unlock_llsec_table,
+	/* TODO above */
+	.set_llsec_params = ieee802154_set_llsec_params,
+	.get_llsec_params = ieee802154_get_llsec_params,
+	.add_llsec_key = ieee802154_add_llsec_key,
+	.del_llsec_key = ieee802154_del_llsec_key,
+	.add_seclevel = ieee802154_add_seclevel,
+	.del_seclevel = ieee802154_del_seclevel,
+	.add_device = ieee802154_add_device,
+	.del_device = ieee802154_del_device,
+	.add_devkey = ieee802154_add_devkey,
+	.del_devkey = ieee802154_del_devkey,
+#endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 };
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index ed26952..7079cd3 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -367,12 +367,11 @@
 	return 0;
 }
 
-static int mac802154_header_create(struct sk_buff *skb,
-				   struct net_device *dev,
-				   unsigned short type,
-				   const void *daddr,
-				   const void *saddr,
-				   unsigned len)
+static int ieee802154_header_create(struct sk_buff *skb,
+				    struct net_device *dev,
+				    const struct ieee802154_addr *daddr,
+				    const struct ieee802154_addr *saddr,
+				    unsigned len)
 {
 	struct ieee802154_hdr hdr;
 	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
@@ -423,24 +422,89 @@
 	return hlen;
 }
 
+static const struct wpan_dev_header_ops ieee802154_header_ops = {
+	.create		= ieee802154_header_create,
+};
+
+/* This header create functionality assumes a 8 byte array for
+ * source and destination pointer at maximum. To adapt this for
+ * the 802.15.4 dataframe header we use extended address handling
+ * here only and intra pan connection. fc fields are mostly fallback
+ * handling. For provide dev_hard_header for dgram sockets.
+ */
+static int mac802154_header_create(struct sk_buff *skb,
+				   struct net_device *dev,
+				   unsigned short type,
+				   const void *daddr,
+				   const void *saddr,
+				   unsigned len)
+{
+	struct ieee802154_hdr hdr;
+	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
+	struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+	struct ieee802154_mac_cb cb = { };
+	int hlen;
+
+	if (!daddr)
+		return -EINVAL;
+
+	memset(&hdr.fc, 0, sizeof(hdr.fc));
+	hdr.fc.type = IEEE802154_FC_TYPE_DATA;
+	hdr.fc.ack_request = wpan_dev->ackreq;
+	hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
+
+	/* TODO currently a workaround to give zero cb block to set
+	 * security parameters defaults according MIB.
+	 */
+	if (mac802154_set_header_security(sdata, &hdr, &cb) < 0)
+		return -EINVAL;
+
+	hdr.dest.pan_id = wpan_dev->pan_id;
+	hdr.dest.mode = IEEE802154_ADDR_LONG;
+	ieee802154_be64_to_le64(&hdr.dest.extended_addr, daddr);
+
+	hdr.source.pan_id = hdr.dest.pan_id;
+	hdr.source.mode = IEEE802154_ADDR_LONG;
+
+	if (!saddr)
+		hdr.source.extended_addr = wpan_dev->extended_addr;
+	else
+		ieee802154_be64_to_le64(&hdr.source.extended_addr, saddr);
+
+	hlen = ieee802154_hdr_push(skb, &hdr);
+	if (hlen < 0)
+		return -EINVAL;
+
+	skb_reset_mac_header(skb);
+	skb->mac_len = hlen;
+
+	if (len > ieee802154_max_payload(&hdr))
+		return -EMSGSIZE;
+
+	return hlen;
+}
+
 static int
 mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
 {
 	struct ieee802154_hdr hdr;
-	struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr;
 
 	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) {
 		pr_debug("malformed packet\n");
 		return 0;
 	}
 
-	*addr = hdr.source;
-	return sizeof(*addr);
+	if (hdr.source.mode == IEEE802154_ADDR_LONG) {
+		ieee802154_le64_to_be64(haddr, &hdr.source.extended_addr);
+		return IEEE802154_EXTENDED_ADDR_LEN;
+	}
+
+	return 0;
 }
 
-static struct header_ops mac802154_header_ops = {
-	.create		= mac802154_header_create,
-	.parse		= mac802154_header_parse,
+static const struct header_ops mac802154_header_ops = {
+	.create         = mac802154_header_create,
+	.parse          = mac802154_header_parse,
 };
 
 static const struct net_device_ops mac802154_wpan_ops = {
@@ -471,9 +535,29 @@
 	dev->addr_len		= IEEE802154_EXTENDED_ADDR_LEN;
 	memset(dev->broadcast, 0xff, IEEE802154_EXTENDED_ADDR_LEN);
 
-	dev->hard_header_len	= MAC802154_FRAME_HARD_HEADER_LEN;
-	dev->needed_tailroom	= 2 + 16; /* FCS + MIC */
-	dev->mtu		= IEEE802154_MTU;
+	/* Let hard_header_len set to IEEE802154_MIN_HEADER_LEN. AF_PACKET
+	 * will not send frames without any payload, but ack frames
+	 * has no payload, so substract one that we can send a 3 bytes
+	 * frame. The xmit callback assumes at least a hard header where two
+	 * bytes fc and sequence field are set.
+	 */
+	dev->hard_header_len	= IEEE802154_MIN_HEADER_LEN - 1;
+	/* The auth_tag header is for security and places in private payload
+	 * room of mac frame which stucks between payload and FCS field.
+	 */
+	dev->needed_tailroom	= IEEE802154_MAX_AUTH_TAG_LEN +
+				  IEEE802154_FCS_LEN;
+	/* The mtu size is the payload without mac header in this case.
+	 * We have a dynamic length header with a minimum header length
+	 * which is hard_header_len. In this case we let mtu to the size
+	 * of maximum payload which is IEEE802154_MTU - IEEE802154_FCS_LEN -
+	 * hard_header_len. The FCS which is set by hardware or ndo_start_xmit
+	 * and the minimum mac header which can be evaluated inside driver
+	 * layer. The rest of mac header will be part of payload if greater
+	 * than hard_header_len.
+	 */
+	dev->mtu		= IEEE802154_MTU - IEEE802154_FCS_LEN -
+				  dev->hard_header_len;
 	dev->tx_queue_len	= 300;
 	dev->flags		= IFF_NOARP | IFF_BROADCAST;
 }
@@ -513,6 +597,7 @@
 		sdata->dev->netdev_ops = &mac802154_wpan_ops;
 		sdata->dev->ml_priv = &mac802154_mlme_wpan;
 		wpan_dev->promiscuous_mode = false;
+		wpan_dev->header_ops = &ieee802154_header_ops;
 
 		mutex_init(&sdata->sec_mtx);
 
@@ -550,7 +635,8 @@
 	if (!ndev)
 		return ERR_PTR(-ENOMEM);
 
-	ndev->needed_headroom = local->hw.extra_tx_headroom;
+	ndev->needed_headroom = local->hw.extra_tx_headroom +
+				IEEE802154_MAX_HEADER_LEN;
 
 	ret = dev_alloc_name(ndev, ndev->name);
 	if (ret < 0)
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 985e939..a13d02b 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -55,7 +55,7 @@
 
 		msl = container_of(sl, struct mac802154_llsec_seclevel, level);
 		list_del(&sl->list);
-		kfree(msl);
+		kzfree(msl);
 	}
 
 	list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
@@ -72,7 +72,7 @@
 		mkey = container_of(key->key, struct mac802154_llsec_key, key);
 		list_del(&key->list);
 		llsec_key_put(mkey);
-		kfree(key);
+		kzfree(key);
 	}
 }
 
@@ -161,7 +161,7 @@
 		if (key->tfm[i])
 			crypto_free_aead(key->tfm[i]);
 
-	kfree(key);
+	kzfree(key);
 	return NULL;
 }
 
@@ -176,7 +176,7 @@
 		crypto_free_aead(key->tfm[i]);
 
 	crypto_free_blkcipher(key->tfm0);
-	kfree(key);
+	kzfree(key);
 }
 
 static struct mac802154_llsec_key*
@@ -267,7 +267,7 @@
 	return 0;
 
 fail:
-	kfree(new);
+	kzfree(new);
 	return -ENOMEM;
 }
 
@@ -347,10 +347,10 @@
 				      devkey);
 
 		list_del(&pos->list);
-		kfree(devkey);
+		kzfree(devkey);
 	}
 
-	kfree(dev);
+	kzfree(dev);
 }
 
 int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
@@ -401,6 +401,7 @@
 
 	hash_del_rcu(&pos->bucket_s);
 	hash_del_rcu(&pos->bucket_hw);
+	list_del_rcu(&pos->dev.list);
 	call_rcu(&pos->rcu, llsec_dev_free_rcu);
 
 	return 0;
@@ -680,7 +681,7 @@
 
 	rc = crypto_aead_encrypt(req);
 
-	kfree(req);
+	kzfree(req);
 
 	return rc;
 }
@@ -880,7 +881,7 @@
 
 	rc = crypto_aead_decrypt(req);
 
-	kfree(req);
+	kzfree(req);
 	skb_trim(skb, skb->len - authlen);
 
 	return rc;
@@ -920,7 +921,7 @@
 		if (!devkey)
 			list_add_rcu(&next->devkey.list, &dev->dev.keys);
 		else
-			kfree(next);
+			kzfree(next);
 
 		spin_unlock_bh(&dev->lock);
 	}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index d1c33c1..42e9672 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -87,6 +87,10 @@
 
 	skb->dev = sdata->dev;
 
+	/* TODO this should be moved after netif_receive_skb call, otherwise
+	 * wireshark will show a mac header with security fields and the
+	 * payload is already decrypted.
+	 */
 	rc = mac802154_llsec_decrypt(&sdata->sec, skb);
 	if (rc) {
 		pr_debug("decryption failed: %i\n", rc);
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 7ed4391..3827f35 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -77,9 +77,6 @@
 		put_unaligned_le16(crc, skb_put(skb, 2));
 	}
 
-	if (skb_cow_head(skb, local->hw.extra_tx_headroom))
-		goto err_tx;
-
 	/* Stop the netif queue on each sub_if_data object. */
 	ieee802154_stop_queue(&local->hw);
 
@@ -121,6 +118,10 @@
 	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
 	int rc;
 
+	/* TODO we should move it to wpan_dev_hard_header and dev_hard_header
+	 * functions. The reason is wireshark will show a mac header which is
+	 * with security fields but the payload is not encrypted.
+	 */
 	rc = mac802154_llsec_encrypt(&sdata->sec, skb);
 	if (rc) {
 		netdev_warn(dev, "encryption failed: %i\n", rc);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index bb185a2..c70d750 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -19,36 +19,13 @@
 #include <net/ipv6.h>
 #include <net/addrconf.h>
 #endif
+#include <net/nexthop.h>
 #include "internal.h"
 
-#define LABEL_NOT_SPECIFIED (1<<20)
-#define MAX_NEW_LABELS 2
-
-/* This maximum ha length copied from the definition of struct neighbour */
-#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
-
-enum mpls_payload_type {
-	MPT_UNSPEC, /* IPv4 or IPv6 */
-	MPT_IPV4 = 4,
-	MPT_IPV6 = 6,
-
-	/* Other types not implemented:
-	 *  - Pseudo-wire with or without control word (RFC4385)
-	 *  - GAL (RFC5586)
-	 */
-};
-
-struct mpls_route { /* next hop label forwarding entry */
-	struct net_device __rcu *rt_dev;
-	struct rcu_head		rt_rcu;
-	u32			rt_label[MAX_NEW_LABELS];
-	u8			rt_protocol; /* routing protocol that set this entry */
-	u8                      rt_payload_type;
-	u8			rt_labels;
-	u8			rt_via_alen;
-	u8			rt_via_table;
-	u8			rt_via[0];
-};
+/* Maximum number of labels to look ahead at when selecting a path of
+ * a multipath route
+ */
+#define MAX_MP_SELECT_LABELS 4
 
 static int zero = 0;
 static int label_limit = (1 << 20) - 1;
@@ -80,10 +57,24 @@
 }
 EXPORT_SYMBOL_GPL(mpls_output_possible);
 
-static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
+static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh)
+{
+	u8 *nh0_via = PTR_ALIGN((u8 *)&rt->rt_nh[rt->rt_nhn], VIA_ALEN_ALIGN);
+	int nh_index = nh - rt->rt_nh;
+
+	return nh0_via + rt->rt_max_alen * nh_index;
+}
+
+static const u8 *mpls_nh_via(const struct mpls_route *rt,
+			     const struct mpls_nh *nh)
+{
+	return __mpls_nh_via((struct mpls_route *)rt, (struct mpls_nh *)nh);
+}
+
+static unsigned int mpls_nh_header_size(const struct mpls_nh *nh)
 {
 	/* The size of the layer 2.5 labels to be added for this route */
-	return rt->rt_labels * sizeof(struct mpls_shim_hdr);
+	return nh->nh_labels * sizeof(struct mpls_shim_hdr);
 }
 
 unsigned int mpls_dev_mtu(const struct net_device *dev)
@@ -105,6 +96,80 @@
 }
 EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
 
+static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
+					     struct sk_buff *skb, bool bos)
+{
+	struct mpls_entry_decoded dec;
+	struct mpls_shim_hdr *hdr;
+	bool eli_seen = false;
+	int label_index;
+	int nh_index = 0;
+	u32 hash = 0;
+
+	/* No need to look further into packet if there's only
+	 * one path
+	 */
+	if (rt->rt_nhn == 1)
+		goto out;
+
+	for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
+	     label_index++) {
+		if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
+			break;
+
+		/* Read and decode the current label */
+		hdr = mpls_hdr(skb) + label_index;
+		dec = mpls_entry_decode(hdr);
+
+		/* RFC6790 - reserved labels MUST NOT be used as keys
+		 * for the load-balancing function
+		 */
+		if (likely(dec.label >= MPLS_LABEL_FIRST_UNRESERVED)) {
+			hash = jhash_1word(dec.label, hash);
+
+			/* The entropy label follows the entropy label
+			 * indicator, so this means that the entropy
+			 * label was just added to the hash - no need to
+			 * go any deeper either in the label stack or in the
+			 * payload
+			 */
+			if (eli_seen)
+				break;
+		} else if (dec.label == MPLS_LABEL_ENTROPY) {
+			eli_seen = true;
+		}
+
+		bos = dec.bos;
+		if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index +
+					 sizeof(struct iphdr))) {
+			const struct iphdr *v4hdr;
+
+			v4hdr = (const struct iphdr *)(mpls_hdr(skb) +
+						       label_index);
+			if (v4hdr->version == 4) {
+				hash = jhash_3words(ntohl(v4hdr->saddr),
+						    ntohl(v4hdr->daddr),
+						    v4hdr->protocol, hash);
+			} else if (v4hdr->version == 6 &&
+				pskb_may_pull(skb, sizeof(*hdr) * label_index +
+					      sizeof(struct ipv6hdr))) {
+				const struct ipv6hdr *v6hdr;
+
+				v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) +
+								label_index);
+
+				hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
+				hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
+				hash = jhash_1word(v6hdr->nexthdr, hash);
+			}
+		}
+	}
+
+	nh_index = hash % rt->rt_nhn;
+out:
+	return &rt->rt_nh[nh_index];
+}
+
 static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
 			struct mpls_entry_decoded dec)
 {
@@ -159,6 +224,7 @@
 	struct net *net = dev_net(dev);
 	struct mpls_shim_hdr *hdr;
 	struct mpls_route *rt;
+	struct mpls_nh *nh;
 	struct mpls_entry_decoded dec;
 	struct net_device *out_dev;
 	struct mpls_dev *mdev;
@@ -196,8 +262,12 @@
 	if (!rt)
 		goto drop;
 
+	nh = mpls_select_multipath(rt, skb, dec.bos);
+	if (!nh)
+		goto drop;
+
 	/* Find the output device */
-	out_dev = rcu_dereference(rt->rt_dev);
+	out_dev = rcu_dereference(nh->nh_dev);
 	if (!mpls_output_possible(out_dev))
 		goto drop;
 
@@ -212,7 +282,7 @@
 	dec.ttl -= 1;
 
 	/* Verify the destination can hold the packet */
-	new_header_size = mpls_rt_header_size(rt);
+	new_header_size = mpls_nh_header_size(nh);
 	mtu = mpls_dev_mtu(out_dev);
 	if (mpls_pkt_too_big(skb, mtu - new_header_size))
 		goto drop;
@@ -240,13 +310,14 @@
 		/* Push the new labels */
 		hdr = mpls_hdr(skb);
 		bos = dec.bos;
-		for (i = rt->rt_labels - 1; i >= 0; i--) {
-			hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos);
+		for (i = nh->nh_labels - 1; i >= 0; i--) {
+			hdr[i] = mpls_entry_encode(nh->nh_label[i],
+						   dec.ttl, 0, bos);
 			bos = false;
 		}
 	}
 
-	err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb);
+	err = neigh_xmit(nh->nh_via_table, out_dev, mpls_nh_via(rt, nh), skb);
 	if (err)
 		net_dbg_ratelimited("%s: packet transmission failed: %d\n",
 				    __func__, err);
@@ -270,24 +341,33 @@
 struct mpls_route_config {
 	u32			rc_protocol;
 	u32			rc_ifindex;
-	u16			rc_via_table;
-	u16			rc_via_alen;
+	u8			rc_via_table;
+	u8			rc_via_alen;
 	u8			rc_via[MAX_VIA_ALEN];
 	u32			rc_label;
-	u32			rc_output_labels;
+	u8			rc_output_labels;
 	u32			rc_output_label[MAX_NEW_LABELS];
 	u32			rc_nlflags;
 	enum mpls_payload_type	rc_payload_type;
 	struct nl_info		rc_nlinfo;
+	struct rtnexthop	*rc_mp;
+	int			rc_mp_len;
 };
 
-static struct mpls_route *mpls_rt_alloc(size_t alen)
+static struct mpls_route *mpls_rt_alloc(int num_nh, u8 max_alen)
 {
+	u8 max_alen_aligned = ALIGN(max_alen, VIA_ALEN_ALIGN);
 	struct mpls_route *rt;
 
-	rt = kzalloc(sizeof(*rt) + alen, GFP_KERNEL);
-	if (rt)
-		rt->rt_via_alen = alen;
+	rt = kzalloc(ALIGN(sizeof(*rt) + num_nh * sizeof(*rt->rt_nh),
+			   VIA_ALEN_ALIGN) +
+		     num_nh * max_alen_aligned,
+		     GFP_KERNEL);
+	if (rt) {
+		rt->rt_nhn = num_nh;
+		rt->rt_max_alen = max_alen_aligned;
+	}
+
 	return rt;
 }
 
@@ -312,25 +392,22 @@
 }
 
 static void mpls_route_update(struct net *net, unsigned index,
-			      struct net_device *dev, struct mpls_route *new,
+			      struct mpls_route *new,
 			      const struct nl_info *info)
 {
 	struct mpls_route __rcu **platform_label;
-	struct mpls_route *rt, *old = NULL;
+	struct mpls_route *rt;
 
 	ASSERT_RTNL();
 
 	platform_label = rtnl_dereference(net->mpls.platform_label);
 	rt = rtnl_dereference(platform_label[index]);
-	if (!dev || (rt && (rtnl_dereference(rt->rt_dev) == dev))) {
-		rcu_assign_pointer(platform_label[index], new);
-		old = rt;
-	}
+	rcu_assign_pointer(platform_label[index], new);
 
-	mpls_notify_route(net, index, old, new, info);
+	mpls_notify_route(net, index, rt, new, info);
 
 	/* If we removed a route free it now */
-	mpls_rt_free(old);
+	mpls_rt_free(rt);
 }
 
 static unsigned find_free_label(struct net *net)
@@ -350,7 +427,8 @@
 }
 
 #if IS_ENABLED(CONFIG_INET)
-static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+static struct net_device *inet_fib_lookup_dev(struct net *net,
+					      const void *addr)
 {
 	struct net_device *dev;
 	struct rtable *rt;
@@ -369,14 +447,16 @@
 	return dev;
 }
 #else
-static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+static struct net_device *inet_fib_lookup_dev(struct net *net,
+					      const void *addr)
 {
 	return ERR_PTR(-EAFNOSUPPORT);
 }
 #endif
 
 #if IS_ENABLED(CONFIG_IPV6)
-static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+static struct net_device *inet6_fib_lookup_dev(struct net *net,
+					       const void *addr)
 {
 	struct net_device *dev;
 	struct dst_entry *dst;
@@ -399,47 +479,234 @@
 	return dev;
 }
 #else
-static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+static struct net_device *inet6_fib_lookup_dev(struct net *net,
+					       const void *addr)
 {
 	return ERR_PTR(-EAFNOSUPPORT);
 }
 #endif
 
 static struct net_device *find_outdev(struct net *net,
-				      struct mpls_route_config *cfg)
+				      struct mpls_route *rt,
+				      struct mpls_nh *nh, int oif)
 {
 	struct net_device *dev = NULL;
 
-	if (!cfg->rc_ifindex) {
-		switch (cfg->rc_via_table) {
+	if (!oif) {
+		switch (nh->nh_via_table) {
 		case NEIGH_ARP_TABLE:
-			dev = inet_fib_lookup_dev(net, cfg->rc_via);
+			dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh));
 			break;
 		case NEIGH_ND_TABLE:
-			dev = inet6_fib_lookup_dev(net, cfg->rc_via);
+			dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh));
 			break;
 		case NEIGH_LINK_TABLE:
 			break;
 		}
 	} else {
-		dev = dev_get_by_index(net, cfg->rc_ifindex);
+		dev = dev_get_by_index(net, oif);
 	}
 
 	if (!dev)
 		return ERR_PTR(-ENODEV);
 
+	/* The caller is holding rtnl anyways, so release the dev reference */
+	dev_put(dev);
+
 	return dev;
 }
 
+static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
+			      struct mpls_nh *nh, int oif)
+{
+	struct net_device *dev = NULL;
+	int err = -ENODEV;
+
+	dev = find_outdev(net, rt, nh, oif);
+	if (IS_ERR(dev)) {
+		err = PTR_ERR(dev);
+		dev = NULL;
+		goto errout;
+	}
+
+	/* Ensure this is a supported device */
+	err = -EINVAL;
+	if (!mpls_dev_get(dev))
+		goto errout;
+
+	RCU_INIT_POINTER(nh->nh_dev, dev);
+
+	return 0;
+
+errout:
+	return err;
+}
+
+static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
+				  struct mpls_route *rt)
+{
+	struct net *net = cfg->rc_nlinfo.nl_net;
+	struct mpls_nh *nh = rt->rt_nh;
+	int err;
+	int i;
+
+	if (!nh)
+		return -ENOMEM;
+
+	err = -EINVAL;
+	/* Ensure only a supported number of labels are present */
+	if (cfg->rc_output_labels > MAX_NEW_LABELS)
+		goto errout;
+
+	nh->nh_labels = cfg->rc_output_labels;
+	for (i = 0; i < nh->nh_labels; i++)
+		nh->nh_label[i] = cfg->rc_output_label[i];
+
+	nh->nh_via_table = cfg->rc_via_table;
+	memcpy(__mpls_nh_via(rt, nh), cfg->rc_via, cfg->rc_via_alen);
+	nh->nh_via_alen = cfg->rc_via_alen;
+
+	err = mpls_nh_assign_dev(net, rt, nh, cfg->rc_ifindex);
+	if (err)
+		goto errout;
+
+	return 0;
+
+errout:
+	return err;
+}
+
+static int mpls_nh_build(struct net *net, struct mpls_route *rt,
+			 struct mpls_nh *nh, int oif,
+			 struct nlattr *via, struct nlattr *newdst)
+{
+	int err = -ENOMEM;
+
+	if (!nh)
+		goto errout;
+
+	if (newdst) {
+		err = nla_get_labels(newdst, MAX_NEW_LABELS,
+				     &nh->nh_labels, nh->nh_label);
+		if (err)
+			goto errout;
+	}
+
+	err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
+			  __mpls_nh_via(rt, nh));
+	if (err)
+		goto errout;
+
+	err = mpls_nh_assign_dev(net, rt, nh, oif);
+	if (err)
+		goto errout;
+
+	return 0;
+
+errout:
+	return err;
+}
+
+static int mpls_count_nexthops(struct rtnexthop *rtnh, int len,
+			       u8 cfg_via_alen, u8 *max_via_alen)
+{
+	int nhs = 0;
+	int remaining = len;
+
+	if (!rtnh) {
+		*max_via_alen = cfg_via_alen;
+		return 1;
+	}
+
+	*max_via_alen = 0;
+
+	while (rtnh_ok(rtnh, remaining)) {
+		struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+		int attrlen;
+
+		attrlen = rtnh_attrlen(rtnh);
+		nla = nla_find(attrs, attrlen, RTA_VIA);
+		if (nla && nla_len(nla) >=
+		    offsetof(struct rtvia, rtvia_addr)) {
+			int via_alen = nla_len(nla) -
+				offsetof(struct rtvia, rtvia_addr);
+
+			if (via_alen <= MAX_VIA_ALEN)
+				*max_via_alen = max_t(u16, *max_via_alen,
+						      via_alen);
+		}
+
+		nhs++;
+		rtnh = rtnh_next(rtnh, &remaining);
+	}
+
+	/* leftover implies invalid nexthop configuration, discard it */
+	return remaining > 0 ? 0 : nhs;
+}
+
+static int mpls_nh_build_multi(struct mpls_route_config *cfg,
+			       struct mpls_route *rt)
+{
+	struct rtnexthop *rtnh = cfg->rc_mp;
+	struct nlattr *nla_via, *nla_newdst;
+	int remaining = cfg->rc_mp_len;
+	int nhs = 0;
+	int err = 0;
+
+	change_nexthops(rt) {
+		int attrlen;
+
+		nla_via = NULL;
+		nla_newdst = NULL;
+
+		err = -EINVAL;
+		if (!rtnh_ok(rtnh, remaining))
+			goto errout;
+
+		/* neither weighted multipath nor any flags
+		 * are supported
+		 */
+		if (rtnh->rtnh_hops || rtnh->rtnh_flags)
+			goto errout;
+
+		attrlen = rtnh_attrlen(rtnh);
+		if (attrlen > 0) {
+			struct nlattr *attrs = rtnh_attrs(rtnh);
+
+			nla_via = nla_find(attrs, attrlen, RTA_VIA);
+			nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
+		}
+
+		if (!nla_via)
+			goto errout;
+
+		err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
+				    rtnh->rtnh_ifindex, nla_via,
+				    nla_newdst);
+		if (err)
+			goto errout;
+
+		rtnh = rtnh_next(rtnh, &remaining);
+		nhs++;
+	} endfor_nexthops(rt);
+
+	rt->rt_nhn = nhs;
+
+	return 0;
+
+errout:
+	return err;
+}
+
 static int mpls_route_add(struct mpls_route_config *cfg)
 {
 	struct mpls_route __rcu **platform_label;
 	struct net *net = cfg->rc_nlinfo.nl_net;
-	struct net_device *dev = NULL;
 	struct mpls_route *rt, *old;
-	unsigned index;
-	int i;
 	int err = -EINVAL;
+	u8 max_via_alen;
+	unsigned index;
+	int nhs;
 
 	index = cfg->rc_label;
 
@@ -457,27 +724,6 @@
 	if (index >= net->mpls.platform_labels)
 		goto errout;
 
-	/* Ensure only a supported number of labels are present */
-	if (cfg->rc_output_labels > MAX_NEW_LABELS)
-		goto errout;
-
-	dev = find_outdev(net, cfg);
-	if (IS_ERR(dev)) {
-		err = PTR_ERR(dev);
-		dev = NULL;
-		goto errout;
-	}
-
-	/* Ensure this is a supported device */
-	err = -EINVAL;
-	if (!mpls_dev_get(dev))
-		goto errout;
-
-	err = -EINVAL;
-	if ((cfg->rc_via_table == NEIGH_LINK_TABLE) &&
-	    (dev->addr_len != cfg->rc_via_alen))
-		goto errout;
-
 	/* Append makes no sense with mpls */
 	err = -EOPNOTSUPP;
 	if (cfg->rc_nlflags & NLM_F_APPEND)
@@ -497,28 +743,34 @@
 	if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
 		goto errout;
 
+	err = -EINVAL;
+	nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len,
+				  cfg->rc_via_alen, &max_via_alen);
+	if (nhs == 0)
+		goto errout;
+
 	err = -ENOMEM;
-	rt = mpls_rt_alloc(cfg->rc_via_alen);
+	rt = mpls_rt_alloc(nhs, max_via_alen);
 	if (!rt)
 		goto errout;
 
-	rt->rt_labels = cfg->rc_output_labels;
-	for (i = 0; i < rt->rt_labels; i++)
-		rt->rt_label[i] = cfg->rc_output_label[i];
 	rt->rt_protocol = cfg->rc_protocol;
-	RCU_INIT_POINTER(rt->rt_dev, dev);
 	rt->rt_payload_type = cfg->rc_payload_type;
-	rt->rt_via_table = cfg->rc_via_table;
-	memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen);
 
-	mpls_route_update(net, index, NULL, rt, &cfg->rc_nlinfo);
+	if (cfg->rc_mp)
+		err = mpls_nh_build_multi(cfg, rt);
+	else
+		err = mpls_nh_build_from_cfg(cfg, rt);
+	if (err)
+		goto freert;
 
-	dev_put(dev);
+	mpls_route_update(net, index, rt, &cfg->rc_nlinfo);
+
 	return 0;
 
+freert:
+	mpls_rt_free(rt);
 errout:
-	if (dev)
-		dev_put(dev);
 	return err;
 }
 
@@ -538,7 +790,7 @@
 	if (index >= net->mpls.platform_labels)
 		goto errout;
 
-	mpls_route_update(net, index, NULL, NULL, &cfg->rc_nlinfo);
+	mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
 
 	err = 0;
 errout:
@@ -635,9 +887,11 @@
 		struct mpls_route *rt = rtnl_dereference(platform_label[index]);
 		if (!rt)
 			continue;
-		if (rtnl_dereference(rt->rt_dev) != dev)
-			continue;
-		rt->rt_dev = NULL;
+		for_nexthops(rt) {
+			if (rtnl_dereference(nh->nh_dev) != dev)
+				continue;
+			nh->nh_dev = NULL;
+		} endfor_nexthops(rt);
 	}
 
 	mdev = mpls_dev_get(dev);
@@ -736,7 +990,7 @@
 EXPORT_SYMBOL_GPL(nla_put_labels);
 
 int nla_get_labels(const struct nlattr *nla,
-		   u32 max_labels, u32 *labels, u32 label[])
+		   u32 max_labels, u8 *labels, u32 label[])
 {
 	unsigned len = nla_len(nla);
 	unsigned nla_labels;
@@ -781,6 +1035,48 @@
 }
 EXPORT_SYMBOL_GPL(nla_get_labels);
 
+int nla_get_via(const struct nlattr *nla, u8 *via_alen,
+		u8 *via_table, u8 via_addr[])
+{
+	struct rtvia *via = nla_data(nla);
+	int err = -EINVAL;
+	int alen;
+
+	if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
+		goto errout;
+	alen = nla_len(nla) -
+			offsetof(struct rtvia, rtvia_addr);
+	if (alen > MAX_VIA_ALEN)
+		goto errout;
+
+	/* Validate the address family */
+	switch (via->rtvia_family) {
+	case AF_PACKET:
+		*via_table = NEIGH_LINK_TABLE;
+		break;
+	case AF_INET:
+		*via_table = NEIGH_ARP_TABLE;
+		if (alen != 4)
+			goto errout;
+		break;
+	case AF_INET6:
+		*via_table = NEIGH_ND_TABLE;
+		if (alen != 16)
+			goto errout;
+		break;
+	default:
+		/* Unsupported address family */
+		goto errout;
+	}
+
+	memcpy(via_addr, via->rtvia_addr, alen);
+	*via_alen = alen;
+	err = 0;
+
+errout:
+	return err;
+}
+
 static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
 			       struct mpls_route_config *cfg)
 {
@@ -844,7 +1140,7 @@
 			break;
 		case RTA_DST:
 		{
-			u32 label_count;
+			u8 label_count;
 			if (nla_get_labels(nla, 1, &label_count,
 					   &cfg->rc_label))
 				goto errout;
@@ -857,35 +1153,15 @@
 		}
 		case RTA_VIA:
 		{
-			struct rtvia *via = nla_data(nla);
-			if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
+			if (nla_get_via(nla, &cfg->rc_via_alen,
+					&cfg->rc_via_table, cfg->rc_via))
 				goto errout;
-			cfg->rc_via_alen   = nla_len(nla) -
-				offsetof(struct rtvia, rtvia_addr);
-			if (cfg->rc_via_alen > MAX_VIA_ALEN)
-				goto errout;
-
-			/* Validate the address family */
-			switch(via->rtvia_family) {
-			case AF_PACKET:
-				cfg->rc_via_table = NEIGH_LINK_TABLE;
-				break;
-			case AF_INET:
-				cfg->rc_via_table = NEIGH_ARP_TABLE;
-				if (cfg->rc_via_alen != 4)
-					goto errout;
-				break;
-			case AF_INET6:
-				cfg->rc_via_table = NEIGH_ND_TABLE;
-				if (cfg->rc_via_alen != 16)
-					goto errout;
-				break;
-			default:
-				/* Unsupported address family */
-				goto errout;
-			}
-
-			memcpy(cfg->rc_via, via->rtvia_addr, cfg->rc_via_alen);
+			break;
+		}
+		case RTA_MULTIPATH:
+		{
+			cfg->rc_mp = nla_data(nla);
+			cfg->rc_mp_len = nla_len(nla);
 			break;
 		}
 		default:
@@ -946,16 +1222,52 @@
 	rtm->rtm_type = RTN_UNICAST;
 	rtm->rtm_flags = 0;
 
-	if (rt->rt_labels &&
-	    nla_put_labels(skb, RTA_NEWDST, rt->rt_labels, rt->rt_label))
-		goto nla_put_failure;
-	if (nla_put_via(skb, rt->rt_via_table, rt->rt_via, rt->rt_via_alen))
-		goto nla_put_failure;
-	dev = rtnl_dereference(rt->rt_dev);
-	if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
-		goto nla_put_failure;
 	if (nla_put_labels(skb, RTA_DST, 1, &label))
 		goto nla_put_failure;
+	if (rt->rt_nhn == 1) {
+		const struct mpls_nh *nh = rt->rt_nh;
+
+		if (nh->nh_labels &&
+		    nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
+				   nh->nh_label))
+			goto nla_put_failure;
+		if (nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
+				nh->nh_via_alen))
+			goto nla_put_failure;
+		dev = rtnl_dereference(nh->nh_dev);
+		if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
+			goto nla_put_failure;
+	} else {
+		struct rtnexthop *rtnh;
+		struct nlattr *mp;
+
+		mp = nla_nest_start(skb, RTA_MULTIPATH);
+		if (!mp)
+			goto nla_put_failure;
+
+		for_nexthops(rt) {
+			rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
+			if (!rtnh)
+				goto nla_put_failure;
+
+			dev = rtnl_dereference(nh->nh_dev);
+			if (dev)
+				rtnh->rtnh_ifindex = dev->ifindex;
+			if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
+							    nh->nh_labels,
+							    nh->nh_label))
+				goto nla_put_failure;
+			if (nla_put_via(skb, nh->nh_via_table,
+					mpls_nh_via(rt, nh),
+					nh->nh_via_alen))
+				goto nla_put_failure;
+
+			/* length of rtnetlink header + attributes */
+			rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
+		} endfor_nexthops(rt);
+
+		nla_nest_end(skb, mp);
+	}
 
 	nlmsg_end(skb, nlh);
 	return 0;
@@ -1000,12 +1312,30 @@
 {
 	size_t payload =
 		NLMSG_ALIGN(sizeof(struct rtmsg))
-		+ nla_total_size(2 + rt->rt_via_alen)	/* RTA_VIA */
 		+ nla_total_size(4);			/* RTA_DST */
-	if (rt->rt_labels)				/* RTA_NEWDST */
-		payload += nla_total_size(rt->rt_labels * 4);
-	if (rt->rt_dev)					/* RTA_OIF */
-		payload += nla_total_size(4);
+
+	if (rt->rt_nhn == 1) {
+		struct mpls_nh *nh = rt->rt_nh;
+
+		if (nh->nh_dev)
+			payload += nla_total_size(4); /* RTA_OIF */
+		payload += nla_total_size(2 + nh->nh_via_alen); /* RTA_VIA */
+		if (nh->nh_labels) /* RTA_NEWDST */
+			payload += nla_total_size(nh->nh_labels * 4);
+	} else {
+		/* each nexthop is packed in an attribute */
+		size_t nhsize = 0;
+
+		for_nexthops(rt) {
+			nhsize += nla_total_size(sizeof(struct rtnexthop));
+			nhsize += nla_total_size(2 + nh->nh_via_alen);
+			if (nh->nh_labels)
+				nhsize += nla_total_size(nh->nh_labels * 4);
+		} endfor_nexthops(rt);
+		/* nested attribute */
+		payload += nla_total_size(nhsize);
+	}
+
 	return payload;
 }
 
@@ -1057,25 +1387,29 @@
 	/* In case the predefined labels need to be populated */
 	if (limit > MPLS_LABEL_IPV4NULL) {
 		struct net_device *lo = net->loopback_dev;
-		rt0 = mpls_rt_alloc(lo->addr_len);
+		rt0 = mpls_rt_alloc(1, lo->addr_len);
 		if (!rt0)
 			goto nort0;
-		RCU_INIT_POINTER(rt0->rt_dev, lo);
+		RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
 		rt0->rt_protocol = RTPROT_KERNEL;
 		rt0->rt_payload_type = MPT_IPV4;
-		rt0->rt_via_table = NEIGH_LINK_TABLE;
-		memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
+		rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
+		rt0->rt_nh->nh_via_alen = lo->addr_len;
+		memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
+		       lo->addr_len);
 	}
 	if (limit > MPLS_LABEL_IPV6NULL) {
 		struct net_device *lo = net->loopback_dev;
-		rt2 = mpls_rt_alloc(lo->addr_len);
+		rt2 = mpls_rt_alloc(1, lo->addr_len);
 		if (!rt2)
 			goto nort2;
-		RCU_INIT_POINTER(rt2->rt_dev, lo);
+		RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
 		rt2->rt_protocol = RTPROT_KERNEL;
 		rt2->rt_payload_type = MPT_IPV6;
-		rt2->rt_via_table = NEIGH_LINK_TABLE;
-		memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
+		rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
+		rt2->rt_nh->nh_via_alen = lo->addr_len;
+		memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
+		       lo->addr_len);
 	}
 
 	rtnl_lock();
@@ -1085,7 +1419,7 @@
 
 	/* Free any labels beyond the new table */
 	for (index = limit; index < old_limit; index++)
-		mpls_route_update(net, index, NULL, NULL, NULL);
+		mpls_route_update(net, index, NULL, NULL);
 
 	/* Copy over the old labels */
 	cp_size = size;
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 2681a4b..bde52ce 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -21,6 +21,76 @@
 
 struct sk_buff;
 
+#define LABEL_NOT_SPECIFIED (1 << 20)
+#define MAX_NEW_LABELS 2
+
+/* This maximum ha length copied from the definition of struct neighbour */
+#define VIA_ALEN_ALIGN sizeof(unsigned long)
+#define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, VIA_ALEN_ALIGN))
+
+enum mpls_payload_type {
+	MPT_UNSPEC, /* IPv4 or IPv6 */
+	MPT_IPV4 = 4,
+	MPT_IPV6 = 6,
+
+	/* Other types not implemented:
+	 *  - Pseudo-wire with or without control word (RFC4385)
+	 *  - GAL (RFC5586)
+	 */
+};
+
+struct mpls_nh { /* next hop label forwarding entry */
+	struct net_device __rcu *nh_dev;
+	u32			nh_label[MAX_NEW_LABELS];
+	u8			nh_labels;
+	u8			nh_via_alen;
+	u8			nh_via_table;
+};
+
+/* The route, nexthops and vias are stored together in the same memory
+ * block:
+ *
+ * +----------------------+
+ * | mpls_route           |
+ * +----------------------+
+ * | mpls_nh 0            |
+ * +----------------------+
+ * | ...                  |
+ * +----------------------+
+ * | mpls_nh n-1          |
+ * +----------------------+
+ * | alignment padding    |
+ * +----------------------+
+ * | via[rt_max_alen] 0   |
+ * +----------------------+
+ * | ...                  |
+ * +----------------------+
+ * | via[rt_max_alen] n-1 |
+ * +----------------------+
+ */
+struct mpls_route { /* next hop label forwarding entry */
+	struct rcu_head		rt_rcu;
+	u8			rt_protocol;
+	u8			rt_payload_type;
+	u8			rt_max_alen;
+	unsigned int		rt_nhn;
+	struct mpls_nh		rt_nh[0];
+};
+
+#define for_nexthops(rt) {						\
+	int nhsel; struct mpls_nh *nh;			\
+	for (nhsel = 0, nh = (rt)->rt_nh;				\
+	     nhsel < (rt)->rt_nhn;					\
+	     nh++, nhsel++)
+
+#define change_nexthops(rt) {						\
+	int nhsel; struct mpls_nh *nh;				\
+	for (nhsel = 0,	nh = (struct mpls_nh *)((rt)->rt_nh);	\
+	     nhsel < (rt)->rt_nhn;					\
+	     nh++, nhsel++)
+
+#define endfor_nexthops(rt) }
+
 static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
 {
 	return (struct mpls_shim_hdr *)skb_network_header(skb);
@@ -52,8 +122,10 @@
 
 int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels,
 		   const u32 label[]);
-int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels,
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u8 *labels,
 		   u32 label[]);
+int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
+		u8 via[]);
 bool mpls_output_possible(const struct net_device *dev);
 unsigned int mpls_dev_mtu(const struct net_device *dev);
 bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 21e70bc..67591ae 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -37,7 +37,7 @@
 	return en->labels * sizeof(struct mpls_shim_hdr);
 }
 
-int mpls_output(struct sock *sk, struct sk_buff *skb)
+int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct mpls_iptunnel_encap *tun_encap_info;
 	struct mpls_shim_hdr *hdr;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 3e1b4ab..e22349e 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -354,7 +354,7 @@
 	select NETFILTER_NETLINK
 	depends on NF_CT_NETLINK
 	depends on NETFILTER_NETLINK_QUEUE
-	depends on NETFILTER_NETLINK_QUEUE_CT
+	depends on NETFILTER_NETLINK_GLUE_CT
 	depends on NETFILTER_ADVANCED
 	help
 	  This option enables the user-space connection tracking helpers
@@ -362,13 +362,14 @@
 
 	  If unsure, say `N'.
 
-config NETFILTER_NETLINK_QUEUE_CT
-        bool "NFQUEUE integration with Connection Tracking"
-        default n
-        depends on NETFILTER_NETLINK_QUEUE
+config NETFILTER_NETLINK_GLUE_CT
+	bool "NFQUEUE and NFLOG integration with Connection Tracking"
+	default n
+	depends on (NETFILTER_NETLINK_QUEUE || NETFILTER_NETLINK_LOG) && NF_CT_NETLINK
 	help
-	  If this option is enabled, NFQUEUE can include Connection Tracking
-	  information together with the packet is the enqueued via NFNETLINK.
+	  If this option is enabled, NFQUEUE and NFLOG can include
+	  Connection Tracking information together with the packet is
+	  the enqueued via NFNETLINK.
 
 config NF_NAT
 	tristate
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 70d026d..7638c36 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -10,8 +10,6 @@
 
 obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
 obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o
-nfnetlink_queue-y := nfnetlink_queue_core.o
-nfnetlink_queue-$(CONFIG_NETFILTER_NETLINK_QUEUE_CT) += nfnetlink_queue_ct.o
 obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
 obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
 
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 2e90733..f39276d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -152,6 +152,8 @@
 #endif
 	synchronize_net();
 	nf_queue_nf_hook_drop(net, &entry->ops);
+	/* other cpu might still process nfqueue verdict that used reg */
+	synchronize_net();
 	kfree(entry);
 }
 EXPORT_SYMBOL(nf_unregister_net_hook);
@@ -313,8 +315,6 @@
 		int err = nf_queue(skb, elem, state,
 				   verdict >> NF_VERDICT_QBITS);
 		if (err < 0) {
-			if (err == -ECANCELED)
-				goto next_hook;
 			if (err == -ESRCH &&
 			   (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
 				goto next_hook;
@@ -348,6 +348,12 @@
 }
 EXPORT_SYMBOL(skb_make_writable);
 
+/* This needs to be compiled in any case to avoid dependencies between the
+ * nfnetlink_queue code and nf_conntrack.
+ */
+struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
+EXPORT_SYMBOL_GPL(nfnl_ct_hook);
+
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
 /* This does not belong here, but locally generated errors need it if connection
    tracking in use: without this, connection may not be in hash table, and hence
@@ -385,9 +391,6 @@
 }
 EXPORT_SYMBOL(nf_conntrack_destroy);
 
-struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly;
-EXPORT_SYMBOL_GPL(nfq_ct_hook);
-
 /* Built-in default zone used e.g. by modules. */
 const struct nf_conntrack_zone nf_ct_zone_dflt = {
 	.id	= NF_CT_DEFAULT_ZONE_ID,
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index a1fe537..5a30ce6 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -297,7 +297,7 @@
 	      ip_set_timeout_expired(ext_timeout(n, set))))
 		n =  NULL;
 
-	e = kzalloc(set->dsize, GFP_KERNEL);
+	e = kzalloc(set->dsize, GFP_ATOMIC);
 	if (!e)
 		return -ENOMEM;
 	e->id = d->id;
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index dfd7b65..0328f72 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -75,7 +75,7 @@
  *	Allocate/initialize app incarnation and register it in proto apps.
  */
 static int
-ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto,
+ip_vs_app_inc_new(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
 		  __u16 port)
 {
 	struct ip_vs_protocol *pp;
@@ -107,7 +107,7 @@
 		}
 	}
 
-	ret = pp->register_app(net, inc);
+	ret = pp->register_app(ipvs, inc);
 	if (ret)
 		goto out;
 
@@ -127,7 +127,7 @@
  *	Release app incarnation
  */
 static void
-ip_vs_app_inc_release(struct net *net, struct ip_vs_app *inc)
+ip_vs_app_inc_release(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
 	struct ip_vs_protocol *pp;
 
@@ -135,7 +135,7 @@
 		return;
 
 	if (pp->unregister_app)
-		pp->unregister_app(net, inc);
+		pp->unregister_app(ipvs, inc);
 
 	IP_VS_DBG(9, "%s App %s:%u unregistered\n",
 		  pp->name, inc->name, ntohs(inc->port));
@@ -175,14 +175,14 @@
  *	Register an application incarnation in protocol applications
  */
 int
-register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto,
 		       __u16 port)
 {
 	int result;
 
 	mutex_lock(&__ip_vs_app_mutex);
 
-	result = ip_vs_app_inc_new(net, app, proto, port);
+	result = ip_vs_app_inc_new(ipvs, app, proto, port);
 
 	mutex_unlock(&__ip_vs_app_mutex);
 
@@ -191,15 +191,11 @@
 
 
 /* Register application for netns */
-struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app)
+struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_app *a;
 	int err = 0;
 
-	if (!ipvs)
-		return ERR_PTR(-ENOENT);
-
 	mutex_lock(&__ip_vs_app_mutex);
 
 	list_for_each_entry(a, &ipvs->app_list, a_list) {
@@ -230,21 +226,17 @@
  *	We are sure there are no app incarnations attached to services
  *	Caller should use synchronize_rcu() or rcu_barrier()
  */
-void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
+void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_app *a, *anxt, *inc, *nxt;
 
-	if (!ipvs)
-		return;
-
 	mutex_lock(&__ip_vs_app_mutex);
 
 	list_for_each_entry_safe(a, anxt, &ipvs->app_list, a_list) {
 		if (app && strcmp(app->name, a->name))
 			continue;
 		list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) {
-			ip_vs_app_inc_release(net, inc);
+			ip_vs_app_inc_release(ipvs, inc);
 		}
 
 		list_del(&a->a_list);
@@ -611,17 +603,19 @@
 };
 #endif
 
-int __net_init ip_vs_app_net_init(struct net *net)
+int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct net *net = ipvs->net;
 
 	INIT_LIST_HEAD(&ipvs->app_list);
 	proc_create("ip_vs_app", 0, net->proc_net, &ip_vs_app_fops);
 	return 0;
 }
 
-void __net_exit ip_vs_app_net_cleanup(struct net *net)
+void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs)
 {
-	unregister_ip_vs_app(net, NULL /* all */);
+	struct net *net = ipvs->net;
+
+	unregister_ip_vs_app(ipvs, NULL /* all */);
 	remove_proc_entry("ip_vs_app", net->proc_net);
 }
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index f71b314..85ca189 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -108,7 +108,7 @@
 /*
  *	Returns hash value for IPVS connection entry
  */
-static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned int proto,
+static unsigned int ip_vs_conn_hashkey(struct netns_ipvs *ipvs, int af, unsigned int proto,
 				       const union nf_inet_addr *addr,
 				       __be16 port)
 {
@@ -116,11 +116,11 @@
 	if (af == AF_INET6)
 		return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
 				    (__force u32)port, proto, ip_vs_conn_rnd) ^
-			((size_t)net>>8)) & ip_vs_conn_tab_mask;
+			((size_t)ipvs>>8)) & ip_vs_conn_tab_mask;
 #endif
 	return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
 			    ip_vs_conn_rnd) ^
-		((size_t)net>>8)) & ip_vs_conn_tab_mask;
+		((size_t)ipvs>>8)) & ip_vs_conn_tab_mask;
 }
 
 static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
@@ -141,14 +141,14 @@
 		port = p->vport;
 	}
 
-	return ip_vs_conn_hashkey(p->net, p->af, p->protocol, addr, port);
+	return ip_vs_conn_hashkey(p->ipvs, p->af, p->protocol, addr, port);
 }
 
 static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
 {
 	struct ip_vs_conn_param p;
 
-	ip_vs_conn_fill_param(ip_vs_conn_net(cp), cp->af, cp->protocol,
+	ip_vs_conn_fill_param(cp->ipvs, cp->af, cp->protocol,
 			      &cp->caddr, cp->cport, NULL, 0, &p);
 
 	if (cp->pe) {
@@ -279,7 +279,7 @@
 		    ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
 		    ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
 		    p->protocol == cp->protocol &&
-		    ip_vs_conn_net_eq(cp, p->net)) {
+		    cp->ipvs == p->ipvs) {
 			if (!__ip_vs_conn_get(cp))
 				continue;
 			/* HIT */
@@ -314,33 +314,34 @@
 }
 
 static int
-ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb,
+ip_vs_conn_fill_param_proto(struct netns_ipvs *ipvs,
+			    int af, const struct sk_buff *skb,
 			    const struct ip_vs_iphdr *iph,
 			    struct ip_vs_conn_param *p)
 {
 	__be16 _ports[2], *pptr;
-	struct net *net = skb_net(skb);
 
 	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
 	if (pptr == NULL)
 		return 1;
 
 	if (likely(!ip_vs_iph_inverse(iph)))
-		ip_vs_conn_fill_param(net, af, iph->protocol, &iph->saddr,
+		ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->saddr,
 				      pptr[0], &iph->daddr, pptr[1], p);
 	else
-		ip_vs_conn_fill_param(net, af, iph->protocol, &iph->daddr,
+		ip_vs_conn_fill_param(ipvs, af, iph->protocol, &iph->daddr,
 				      pptr[1], &iph->saddr, pptr[0], p);
 	return 0;
 }
 
 struct ip_vs_conn *
-ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
+ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af,
+			const struct sk_buff *skb,
 			const struct ip_vs_iphdr *iph)
 {
 	struct ip_vs_conn_param p;
 
-	if (ip_vs_conn_fill_param_proto(af, skb, iph, &p))
+	if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
 		return NULL;
 
 	return ip_vs_conn_in_get(&p);
@@ -359,7 +360,7 @@
 
 	hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
 		if (unlikely(p->pe_data && p->pe->ct_match)) {
-			if (!ip_vs_conn_net_eq(cp, p->net))
+			if (cp->ipvs != p->ipvs)
 				continue;
 			if (p->pe == cp->pe && p->pe->ct_match(p, cp)) {
 				if (__ip_vs_conn_get(cp))
@@ -377,7 +378,7 @@
 		    p->vport == cp->vport && p->cport == cp->cport &&
 		    cp->flags & IP_VS_CONN_F_TEMPLATE &&
 		    p->protocol == cp->protocol &&
-		    ip_vs_conn_net_eq(cp, p->net)) {
+		    cp->ipvs == p->ipvs) {
 			if (__ip_vs_conn_get(cp))
 				goto out;
 		}
@@ -418,7 +419,7 @@
 		    ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
 		    ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
 		    p->protocol == cp->protocol &&
-		    ip_vs_conn_net_eq(cp, p->net)) {
+		    cp->ipvs == p->ipvs) {
 			if (!__ip_vs_conn_get(cp))
 				continue;
 			/* HIT */
@@ -439,12 +440,13 @@
 }
 
 struct ip_vs_conn *
-ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
+ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
+			 const struct sk_buff *skb,
 			 const struct ip_vs_iphdr *iph)
 {
 	struct ip_vs_conn_param p;
 
-	if (ip_vs_conn_fill_param_proto(af, skb, iph, &p))
+	if (ip_vs_conn_fill_param_proto(ipvs, af, skb, iph, &p))
 		return NULL;
 
 	return ip_vs_conn_out_get(&p);
@@ -638,7 +640,7 @@
 	 * so we can make the assumption that the svc_af is the same as the
 	 * dest_af
 	 */
-	dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, cp->af, &cp->daddr,
+	dest = ip_vs_find_dest(cp->ipvs, cp->af, cp->af, &cp->daddr,
 			       cp->dport, &cp->vaddr, cp->vport,
 			       cp->protocol, cp->fwmark, cp->flags);
 	if (dest) {
@@ -668,7 +670,7 @@
 #endif
 			ip_vs_bind_xmit(cp);
 
-		pd = ip_vs_proto_data_get(ip_vs_conn_net(cp), cp->protocol);
+		pd = ip_vs_proto_data_get(cp->ipvs, cp->protocol);
 		if (pd && atomic_read(&pd->appcnt))
 			ip_vs_bind_app(cp, pd->pp);
 	}
@@ -746,7 +748,7 @@
 int ip_vs_check_template(struct ip_vs_conn *ct)
 {
 	struct ip_vs_dest *dest = ct->dest;
-	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(ct));
+	struct netns_ipvs *ipvs = ct->ipvs;
 
 	/*
 	 * Checking the dest server status.
@@ -800,8 +802,7 @@
 static void ip_vs_conn_expire(unsigned long data)
 {
 	struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
-	struct net *net = ip_vs_conn_net(cp);
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct netns_ipvs *ipvs = cp->ipvs;
 
 	/*
 	 *	do I control anybody?
@@ -847,7 +848,7 @@
 	cp->timeout = 60*HZ;
 
 	if (ipvs->sync_state & IP_VS_STATE_MASTER)
-		ip_vs_sync_conn(net, cp, sysctl_sync_threshold(ipvs));
+		ip_vs_sync_conn(ipvs, cp, sysctl_sync_threshold(ipvs));
 
 	ip_vs_conn_put(cp);
 }
@@ -875,8 +876,8 @@
 	       struct ip_vs_dest *dest, __u32 fwmark)
 {
 	struct ip_vs_conn *cp;
-	struct netns_ipvs *ipvs = net_ipvs(p->net);
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net,
+	struct netns_ipvs *ipvs = p->ipvs;
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->ipvs,
 							   p->protocol);
 
 	cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC);
@@ -887,7 +888,7 @@
 
 	INIT_HLIST_NODE(&cp->c_list);
 	setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
-	ip_vs_conn_net_set(cp, p->net);
+	cp->ipvs	   = ipvs;
 	cp->af		   = p->af;
 	cp->daf		   = dest_af;
 	cp->protocol	   = p->protocol;
@@ -1061,7 +1062,7 @@
 		size_t len = 0;
 		char dbuf[IP_VS_ADDRSTRLEN];
 
-		if (!ip_vs_conn_net_eq(cp, net))
+		if (!net_eq(cp->ipvs->net, net))
 			return 0;
 		if (cp->pe_data) {
 			pe_data[0] = ' ';
@@ -1146,7 +1147,7 @@
 		const struct ip_vs_conn *cp = v;
 		struct net *net = seq_file_net(seq);
 
-		if (!ip_vs_conn_net_eq(cp, net))
+		if (!net_eq(cp->ipvs->net, net))
 			return 0;
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1240,7 +1241,7 @@
 }
 
 /* Called from keventd and must protect itself from softirqs */
-void ip_vs_random_dropentry(struct net *net)
+void ip_vs_random_dropentry(struct netns_ipvs *ipvs)
 {
 	int idx;
 	struct ip_vs_conn *cp, *cp_c;
@@ -1256,7 +1257,7 @@
 			if (cp->flags & IP_VS_CONN_F_TEMPLATE)
 				/* connection template */
 				continue;
-			if (!ip_vs_conn_net_eq(cp, net))
+			if (cp->ipvs != ipvs)
 				continue;
 			if (cp->protocol == IPPROTO_TCP) {
 				switch(cp->state) {
@@ -1308,18 +1309,17 @@
 /*
  *      Flush all the connection entries in the ip_vs_conn_tab
  */
-static void ip_vs_conn_flush(struct net *net)
+static void ip_vs_conn_flush(struct netns_ipvs *ipvs)
 {
 	int idx;
 	struct ip_vs_conn *cp, *cp_c;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 flush_again:
 	rcu_read_lock();
 	for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
 
 		hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
-			if (!ip_vs_conn_net_eq(cp, net))
+			if (cp->ipvs != ipvs)
 				continue;
 			IP_VS_DBG(4, "del connection\n");
 			ip_vs_conn_expire_now(cp);
@@ -1345,23 +1345,22 @@
 /*
  * per netns init and exit
  */
-int __net_init ip_vs_conn_net_init(struct net *net)
+int __net_init ip_vs_conn_net_init(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
 	atomic_set(&ipvs->conn_count, 0);
 
-	proc_create("ip_vs_conn", 0, net->proc_net, &ip_vs_conn_fops);
-	proc_create("ip_vs_conn_sync", 0, net->proc_net, &ip_vs_conn_sync_fops);
+	proc_create("ip_vs_conn", 0, ipvs->net->proc_net, &ip_vs_conn_fops);
+	proc_create("ip_vs_conn_sync", 0, ipvs->net->proc_net,
+		    &ip_vs_conn_sync_fops);
 	return 0;
 }
 
-void __net_exit ip_vs_conn_net_cleanup(struct net *net)
+void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs)
 {
 	/* flush all the connection entries first */
-	ip_vs_conn_flush(net);
-	remove_proc_entry("ip_vs_conn", net->proc_net);
-	remove_proc_entry("ip_vs_conn_sync", net->proc_net);
+	ip_vs_conn_flush(ipvs);
+	remove_proc_entry("ip_vs_conn", ipvs->net->proc_net);
+	remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net);
 }
 
 int __init ip_vs_conn_init(void)
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 1fa12ed..1e24fff 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -112,7 +112,7 @@
 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 {
 	struct ip_vs_dest *dest = cp->dest;
-	struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+	struct netns_ipvs *ipvs = cp->ipvs;
 
 	if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
 		struct ip_vs_cpu_stats *s;
@@ -146,7 +146,7 @@
 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 {
 	struct ip_vs_dest *dest = cp->dest;
-	struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+	struct netns_ipvs *ipvs = cp->ipvs;
 
 	if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
 		struct ip_vs_cpu_stats *s;
@@ -179,7 +179,7 @@
 static inline void
 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
 {
-	struct netns_ipvs *ipvs = net_ipvs(svc->net);
+	struct netns_ipvs *ipvs = svc->ipvs;
 	struct ip_vs_cpu_stats *s;
 
 	s = this_cpu_ptr(cp->dest->stats.cpustats);
@@ -215,7 +215,7 @@
 			      const union nf_inet_addr *vaddr, __be16 vport,
 			      struct ip_vs_conn_param *p)
 {
-	ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
+	ip_vs_conn_fill_param(svc->ipvs, svc->af, protocol, caddr, cport, vaddr,
 			      vport, p);
 	p->pe = rcu_dereference(svc->pe);
 	if (p->pe && p->pe->fill_param)
@@ -376,7 +376,7 @@
 	/*
 	 *    Create a new connection according to the template
 	 */
-	ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, src_addr,
+	ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol, src_addr,
 			      src_port, dst_addr, dst_port, &param);
 
 	cp = ip_vs_conn_new(&param, dest->af, &dest->addr, dport, flags, dest,
@@ -469,7 +469,7 @@
 	 */
 	if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK)) {
 		iph->hdr_flags ^= IP_VS_HDR_INVERSE;
-		cp = pp->conn_in_get(svc->af, skb, iph);
+		cp = pp->conn_in_get(svc->ipvs, svc->af, skb, iph);
 		iph->hdr_flags ^= IP_VS_HDR_INVERSE;
 
 		if (cp) {
@@ -524,7 +524,7 @@
 	{
 		struct ip_vs_conn_param p;
 
-		ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
+		ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
 				      caddr, cport, vaddr, vport, &p);
 		cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
 				    dest->port ? dest->port : vport,
@@ -547,7 +547,6 @@
 	return cp;
 }
 
-#ifdef CONFIG_SYSCTL
 static inline int ip_vs_addr_is_unicast(struct net *net, int af,
 					union nf_inet_addr *addr)
 {
@@ -557,7 +556,6 @@
 #endif
 	return (inet_addr_type(net, addr->ip) == RTN_UNICAST);
 }
-#endif
 
 /*
  *  Pass or drop the packet.
@@ -568,25 +566,18 @@
 		struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph)
 {
 	__be16 _ports[2], *pptr, dport;
-#ifdef CONFIG_SYSCTL
-	struct net *net;
-	struct netns_ipvs *ipvs;
-#endif
+	struct netns_ipvs *ipvs = svc->ipvs;
+	struct net *net = ipvs->net;
 
 	pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
 	if (!pptr)
 		return NF_DROP;
 	dport = likely(!ip_vs_iph_inverse(iph)) ? pptr[1] : pptr[0];
 
-#ifdef CONFIG_SYSCTL
-	net = skb_net(skb);
-
-
 	/* if it is fwmark-based service, the cache_bypass sysctl is up
 	   and the destination is a non-local unicast, then create
 	   a cache_bypass connection entry */
-	ipvs = net_ipvs(net);
-	if (ipvs->sysctl_cache_bypass && svc->fwmark &&
+	if (sysctl_cache_bypass(ipvs) && svc->fwmark &&
 	    !(iph->hdr_flags & (IP_VS_HDR_INVERSE | IP_VS_HDR_ICMP)) &&
 	    ip_vs_addr_is_unicast(net, svc->af, &iph->daddr)) {
 		int ret;
@@ -600,7 +591,7 @@
 		IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
 		{
 			struct ip_vs_conn_param p;
-			ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
+			ip_vs_conn_fill_param(svc->ipvs, svc->af, iph->protocol,
 					      &iph->saddr, pptr[0],
 					      &iph->daddr, pptr[1], &p);
 			cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
@@ -624,7 +615,6 @@
 		ip_vs_conn_put(cp);
 		return ret;
 	}
-#endif
 
 	/*
 	 * When the virtual ftp service is presented, packets destined
@@ -647,11 +637,8 @@
 	 */
 #ifdef CONFIG_IP_VS_IPV6
 	if (svc->af == AF_INET6) {
-		if (!skb->dev) {
-			struct net *net_ = dev_net(skb_dst(skb)->dev);
-
-			skb->dev = net_->loopback_dev;
-		}
+		if (!skb->dev)
+			skb->dev = net->loopback_dev;
 		icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 	} else
 #endif
@@ -662,15 +649,13 @@
 
 #ifdef CONFIG_SYSCTL
 
-static int sysctl_snat_reroute(struct sk_buff *skb)
+static int sysctl_snat_reroute(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
 	return ipvs->sysctl_snat_reroute;
 }
 
-static int sysctl_nat_icmp_send(struct net *net)
+static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	return ipvs->sysctl_nat_icmp_send;
 }
 
@@ -681,8 +666,8 @@
 
 #else
 
-static int sysctl_snat_reroute(struct sk_buff *skb) { return 0; }
-static int sysctl_nat_icmp_send(struct net *net) { return 0; }
+static int sysctl_snat_reroute(struct netns_ipvs *ipvs) { return 0; }
+static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) { return 0; }
 static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; }
 
 #endif
@@ -701,12 +686,13 @@
 	return IP_DEFRAG_VS_OUT;
 }
 
-static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
+static inline int ip_vs_gather_frags(struct netns_ipvs *ipvs,
+				     struct sk_buff *skb, u_int32_t user)
 {
 	int err;
 
 	local_bh_disable();
-	err = ip_defrag(skb, user);
+	err = ip_defrag(ipvs->net, skb, user);
 	local_bh_enable();
 	if (!err)
 		ip_send_check(ip_hdr(skb));
@@ -714,10 +700,10 @@
 	return err;
 }
 
-static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
-				 unsigned int hooknum)
+static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af,
+				 struct sk_buff *skb, unsigned int hooknum)
 {
-	if (!sysctl_snat_reroute(skb))
+	if (!sysctl_snat_reroute(ipvs))
 		return 0;
 	/* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
 	if (NF_INET_LOCAL_IN == hooknum)
@@ -727,12 +713,12 @@
 		struct dst_entry *dst = skb_dst(skb);
 
 		if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
-		    ip6_route_me_harder(skb) != 0)
+		    ip6_route_me_harder(ipvs->net, skb) != 0)
 			return 1;
 	} else
 #endif
 		if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
-		    ip_route_me_harder(skb, RTN_LOCAL) != 0)
+		    ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0)
 			return 1;
 
 	return 0;
@@ -885,7 +871,7 @@
 #endif
 		ip_vs_nat_icmp(skb, pp, cp, 1);
 
-	if (ip_vs_route_me_harder(af, skb, hooknum))
+	if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
 		goto out;
 
 	/* do the statistics and put it back */
@@ -909,8 +895,8 @@
  *	Find any that might be relevant, check against existing connections.
  *	Currently handles error types - unreachable, quench, ttl exceeded.
  */
-static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
-			  unsigned int hooknum)
+static int ip_vs_out_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb,
+			  int *related, unsigned int hooknum)
 {
 	struct iphdr *iph;
 	struct icmphdr	_icmph, *ic;
@@ -925,7 +911,7 @@
 
 	/* reassemble IP fragments */
 	if (ip_is_fragment(ip_hdr(skb))) {
-		if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
+		if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
 			return NF_STOLEN;
 	}
 
@@ -974,7 +960,7 @@
 	ip_vs_fill_iph_skb_icmp(AF_INET, skb, offset, true, &ciph);
 
 	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_out_get(AF_INET, skb, &ciph);
+	cp = pp->conn_out_get(ipvs, AF_INET, skb, &ciph);
 	if (!cp)
 		return NF_ACCEPT;
 
@@ -984,8 +970,9 @@
 }
 
 #ifdef CONFIG_IP_VS_IPV6
-static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
-			     unsigned int hooknum, struct ip_vs_iphdr *ipvsh)
+static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
+			     int *related,  unsigned int hooknum,
+			     struct ip_vs_iphdr *ipvsh)
 {
 	struct icmp6hdr	_icmph, *ic;
 	struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
@@ -1029,7 +1016,7 @@
 		return NF_ACCEPT;
 
 	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_out_get(AF_INET6, skb, &ciph);
+	cp = pp->conn_out_get(ipvs, AF_INET6, skb, &ciph);
 	if (!cp)
 		return NF_ACCEPT;
 
@@ -1155,7 +1142,7 @@
 	 * if it came from this machine itself.  So re-compute
 	 * the routing information.
 	 */
-	if (ip_vs_route_me_harder(af, skb, hooknum))
+	if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
 		goto drop;
 
 	IP_VS_DBG_PKT(10, af, pp, skb, iph->off, "After SNAT");
@@ -1183,9 +1170,8 @@
  *	Check if outgoing packet belongs to the established ip_vs_conn.
  */
 static unsigned int
-ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
+ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
 {
-	struct net *net = NULL;
 	struct ip_vs_iphdr iph;
 	struct ip_vs_protocol *pp;
 	struct ip_vs_proto_data *pd;
@@ -1210,8 +1196,7 @@
 	if (unlikely(!skb_dst(skb)))
 		return NF_ACCEPT;
 
-	net = skb_net(skb);
-	if (!net_ipvs(net)->enable)
+	if (!ipvs->enable)
 		return NF_ACCEPT;
 
 	ip_vs_fill_iph_skb(af, skb, false, &iph);
@@ -1219,7 +1204,7 @@
 	if (af == AF_INET6) {
 		if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
 			int related;
-			int verdict = ip_vs_out_icmp_v6(skb, &related,
+			int verdict = ip_vs_out_icmp_v6(ipvs, skb, &related,
 							hooknum, &iph);
 
 			if (related)
@@ -1229,13 +1214,13 @@
 #endif
 		if (unlikely(iph.protocol == IPPROTO_ICMP)) {
 			int related;
-			int verdict = ip_vs_out_icmp(skb, &related, hooknum);
+			int verdict = ip_vs_out_icmp(ipvs, skb, &related, hooknum);
 
 			if (related)
 				return verdict;
 		}
 
-	pd = ip_vs_proto_data_get(net, iph.protocol);
+	pd = ip_vs_proto_data_get(ipvs, iph.protocol);
 	if (unlikely(!pd))
 		return NF_ACCEPT;
 	pp = pd->pp;
@@ -1245,7 +1230,7 @@
 	if (af == AF_INET)
 #endif
 		if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
-			if (ip_vs_gather_frags(skb,
+			if (ip_vs_gather_frags(ipvs, skb,
 					       ip_vs_defrag_user(hooknum)))
 				return NF_STOLEN;
 
@@ -1255,11 +1240,11 @@
 	/*
 	 * Check if the packet belongs to an existing entry
 	 */
-	cp = pp->conn_out_get(af, skb, &iph);
+	cp = pp->conn_out_get(ipvs, af, skb, &iph);
 
 	if (likely(cp))
 		return handle_response(af, skb, pd, cp, &iph, hooknum);
-	if (sysctl_nat_icmp_send(net) &&
+	if (sysctl_nat_icmp_send(ipvs) &&
 	    (pp->protocol == IPPROTO_TCP ||
 	     pp->protocol == IPPROTO_UDP ||
 	     pp->protocol == IPPROTO_SCTP)) {
@@ -1269,7 +1254,7 @@
 					 sizeof(_ports), _ports, &iph);
 		if (pptr == NULL)
 			return NF_ACCEPT;	/* Not for me */
-		if (ip_vs_has_real_service(net, af, iph.protocol, &iph.saddr,
+		if (ip_vs_has_real_service(ipvs, af, iph.protocol, &iph.saddr,
 					   pptr[0])) {
 			/*
 			 * Notify the real server: there is no
@@ -1286,7 +1271,7 @@
 #ifdef CONFIG_IP_VS_IPV6
 				if (af == AF_INET6) {
 					if (!skb->dev)
-						skb->dev = net->loopback_dev;
+						skb->dev = ipvs->net->loopback_dev;
 					icmpv6_send(skb,
 						    ICMPV6_DEST_UNREACH,
 						    ICMPV6_PORT_UNREACH,
@@ -1314,7 +1299,7 @@
 ip_vs_reply4(void *priv, struct sk_buff *skb,
 	     const struct nf_hook_state *state)
 {
-	return ip_vs_out(state->hook, skb, AF_INET);
+	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
 }
 
 /*
@@ -1325,7 +1310,7 @@
 ip_vs_local_reply4(void *priv, struct sk_buff *skb,
 		   const struct nf_hook_state *state)
 {
-	return ip_vs_out(state->hook, skb, AF_INET);
+	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1339,7 +1324,7 @@
 ip_vs_reply6(void *priv, struct sk_buff *skb,
 	     const struct nf_hook_state *state)
 {
-	return ip_vs_out(state->hook, skb, AF_INET6);
+	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
 }
 
 /*
@@ -1350,13 +1335,14 @@
 ip_vs_local_reply6(void *priv, struct sk_buff *skb,
 		   const struct nf_hook_state *state)
 {
-	return ip_vs_out(state->hook, skb, AF_INET6);
+	return ip_vs_out(net_ipvs(state->net), state->hook, skb, AF_INET6);
 }
 
 #endif
 
 static unsigned int
-ip_vs_try_to_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
+ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
+		      struct ip_vs_proto_data *pd,
 		      int *verdict, struct ip_vs_conn **cpp,
 		      struct ip_vs_iphdr *iph)
 {
@@ -1368,7 +1354,7 @@
 		 */
 
 		/* Schedule and create new connection entry into cpp */
-		if (!pp->conn_schedule(af, skb, pd, verdict, cpp, iph))
+		if (!pp->conn_schedule(ipvs, af, skb, pd, verdict, cpp, iph))
 			return 0;
 	}
 
@@ -1398,9 +1384,9 @@
  *	Currently handles error types - unreachable, quench, ttl exceeded.
  */
 static int
-ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
+ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related,
+	      unsigned int hooknum)
 {
-	struct net *net = NULL;
 	struct iphdr *iph;
 	struct icmphdr	_icmph, *ic;
 	struct iphdr	_ciph, *cih;	/* The ip header contained within the ICMP */
@@ -1415,7 +1401,7 @@
 
 	/* reassemble IP fragments */
 	if (ip_is_fragment(ip_hdr(skb))) {
-		if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
+		if (ip_vs_gather_frags(ipvs, skb, ip_vs_defrag_user(hooknum)))
 			return NF_STOLEN;
 	}
 
@@ -1449,8 +1435,6 @@
 	if (cih == NULL)
 		return NF_ACCEPT; /* The packet looks wrong, ignore */
 
-	net = skb_net(skb);
-
 	/* Special case for errors for IPIP packets */
 	ipip = false;
 	if (cih->protocol == IPPROTO_IPIP) {
@@ -1466,7 +1450,7 @@
 		ipip = true;
 	}
 
-	pd = ip_vs_proto_data_get(net, cih->protocol);
+	pd = ip_vs_proto_data_get(ipvs, cih->protocol);
 	if (!pd)
 		return NF_ACCEPT;
 	pp = pd->pp;
@@ -1486,15 +1470,15 @@
 	/* The embedded headers contain source and dest in reverse order.
 	 * For IPIP this is error for request, not for reply.
 	 */
-	cp = pp->conn_in_get(AF_INET, skb, &ciph);
+	cp = pp->conn_in_get(ipvs, AF_INET, skb, &ciph);
 
 	if (!cp) {
 		int v;
 
-		if (!sysctl_schedule_icmp(net_ipvs(net)))
+		if (!sysctl_schedule_icmp(ipvs))
 			return NF_ACCEPT;
 
-		if (!ip_vs_try_to_schedule(AF_INET, skb, pd, &v, &cp, &ciph))
+		if (!ip_vs_try_to_schedule(ipvs, AF_INET, skb, pd, &v, &cp, &ciph))
 			return v;
 		new_cp = true;
 	}
@@ -1528,7 +1512,7 @@
 			skb_reset_network_header(skb);
 			IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
 				&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
-			ipv4_update_pmtu(skb, dev_net(skb->dev),
+			ipv4_update_pmtu(skb, ipvs->net,
 					 mtu, 0, 0, 0, 0);
 			/* Client uses PMTUD? */
 			if (!(frag_off & htons(IP_DF)))
@@ -1583,10 +1567,10 @@
 }
 
 #ifdef CONFIG_IP_VS_IPV6
-static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related,
-			    unsigned int hooknum, struct ip_vs_iphdr *iph)
+static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb,
+			    int *related, unsigned int hooknum,
+			    struct ip_vs_iphdr *iph)
 {
-	struct net *net = NULL;
 	struct icmp6hdr	_icmph, *ic;
 	struct ip_vs_iphdr ciph = {.flags = 0, .fragoffs = 0};/*Contained IP */
 	struct ip_vs_conn *cp;
@@ -1626,8 +1610,7 @@
 	if (!ip_vs_fill_iph_skb_icmp(AF_INET6, skb, offset, true, &ciph))
 		return NF_ACCEPT;
 
-	net = skb_net(skb);
-	pd = ip_vs_proto_data_get(net, ciph.protocol);
+	pd = ip_vs_proto_data_get(ipvs, ciph.protocol);
 	if (!pd)
 		return NF_ACCEPT;
 	pp = pd->pp;
@@ -1642,15 +1625,15 @@
 	/* The embedded headers contain source and dest in reverse order
 	 * if not from localhost
 	 */
-	cp = pp->conn_in_get(AF_INET6, skb, &ciph);
+	cp = pp->conn_in_get(ipvs, AF_INET6, skb, &ciph);
 
 	if (!cp) {
 		int v;
 
-		if (!sysctl_schedule_icmp(net_ipvs(net)))
+		if (!sysctl_schedule_icmp(ipvs))
 			return NF_ACCEPT;
 
-		if (!ip_vs_try_to_schedule(AF_INET6, skb, pd, &v, &cp, &ciph))
+		if (!ip_vs_try_to_schedule(ipvs, AF_INET6, skb, pd, &v, &cp, &ciph))
 			return v;
 
 		new_cp = true;
@@ -1690,15 +1673,13 @@
  *	and send it on its way...
  */
 static unsigned int
-ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
+ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int af)
 {
-	struct net *net;
 	struct ip_vs_iphdr iph;
 	struct ip_vs_protocol *pp;
 	struct ip_vs_proto_data *pd;
 	struct ip_vs_conn *cp;
 	int ret, pkts;
-	struct netns_ipvs *ipvs;
 	int conn_reuse_mode;
 
 	/* Already marked as IPVS request or reply? */
@@ -1721,8 +1702,6 @@
 		return NF_ACCEPT;
 	}
 	/* ipvs enabled in this netns ? */
-	net = skb_net(skb);
-	ipvs = net_ipvs(net);
 	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
 		return NF_ACCEPT;
 
@@ -1742,8 +1721,8 @@
 	if (af == AF_INET6) {
 		if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
 			int related;
-			int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
-						       &iph);
+			int verdict = ip_vs_in_icmp_v6(ipvs, skb, &related,
+						       hooknum, &iph);
 
 			if (related)
 				return verdict;
@@ -1752,14 +1731,15 @@
 #endif
 		if (unlikely(iph.protocol == IPPROTO_ICMP)) {
 			int related;
-			int verdict = ip_vs_in_icmp(skb, &related, hooknum);
+			int verdict = ip_vs_in_icmp(ipvs, skb, &related,
+						    hooknum);
 
 			if (related)
 				return verdict;
 		}
 
 	/* Protocol supported? */
-	pd = ip_vs_proto_data_get(net, iph.protocol);
+	pd = ip_vs_proto_data_get(ipvs, iph.protocol);
 	if (unlikely(!pd)) {
 		/* The only way we'll see this packet again is if it's
 		 * encapsulated, so mark it with ipvs_property=1 so we
@@ -1774,7 +1754,7 @@
 	/*
 	 * Check if the packet belongs to an existing connection entry
 	 */
-	cp = pp->conn_in_get(af, skb, &iph);
+	cp = pp->conn_in_get(ipvs, af, skb, &iph);
 
 	conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
 	if (conn_reuse_mode && !iph.fragoffs &&
@@ -1791,7 +1771,7 @@
 	if (unlikely(!cp)) {
 		int v;
 
-		if (!ip_vs_try_to_schedule(af, skb, pd, &v, &cp, &iph))
+		if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph))
 			return v;
 	}
 
@@ -1836,7 +1816,7 @@
 		pkts = atomic_add_return(1, &cp->in_pkts);
 
 	if (ipvs->sync_state & IP_VS_STATE_MASTER)
-		ip_vs_sync_conn(net, cp, pkts);
+		ip_vs_sync_conn(ipvs, cp, pkts);
 
 	ip_vs_conn_put(cp);
 	return ret;
@@ -1850,7 +1830,7 @@
 ip_vs_remote_request4(void *priv, struct sk_buff *skb,
 		      const struct nf_hook_state *state)
 {
-	return ip_vs_in(state->hook, skb, AF_INET);
+	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
 }
 
 /*
@@ -1861,7 +1841,7 @@
 ip_vs_local_request4(void *priv, struct sk_buff *skb,
 		     const struct nf_hook_state *state)
 {
-	return ip_vs_in(state->hook, skb, AF_INET);
+	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1874,7 +1854,7 @@
 ip_vs_remote_request6(void *priv, struct sk_buff *skb,
 		      const struct nf_hook_state *state)
 {
-	return ip_vs_in(state->hook, skb, AF_INET6);
+	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
 }
 
 /*
@@ -1885,7 +1865,7 @@
 ip_vs_local_request6(void *priv, struct sk_buff *skb,
 		     const struct nf_hook_state *state)
 {
-	return ip_vs_in(state->hook, skb, AF_INET6);
+	return ip_vs_in(net_ipvs(state->net), state->hook, skb, AF_INET6);
 }
 
 #endif
@@ -1905,19 +1885,16 @@
 		   const struct nf_hook_state *state)
 {
 	int r;
-	struct net *net;
-	struct netns_ipvs *ipvs;
+	struct netns_ipvs *ipvs = net_ipvs(state->net);
 
 	if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
 		return NF_ACCEPT;
 
 	/* ipvs enabled in this netns ? */
-	net = skb_net(skb);
-	ipvs = net_ipvs(net);
 	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
 		return NF_ACCEPT;
 
-	return ip_vs_in_icmp(skb, &r, state->hook);
+	return ip_vs_in_icmp(ipvs, skb, &r, state->hook);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1926,8 +1903,7 @@
 		      const struct nf_hook_state *state)
 {
 	int r;
-	struct net *net;
-	struct netns_ipvs *ipvs;
+	struct netns_ipvs *ipvs = net_ipvs(state->net);
 	struct ip_vs_iphdr iphdr;
 
 	ip_vs_fill_iph_skb(AF_INET6, skb, false, &iphdr);
@@ -1935,12 +1911,10 @@
 		return NF_ACCEPT;
 
 	/* ipvs enabled in this netns ? */
-	net = skb_net(skb);
-	ipvs = net_ipvs(net);
 	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
 		return NF_ACCEPT;
 
-	return ip_vs_in_icmp_v6(skb, &r, state->hook, &iphdr);
+	return ip_vs_in_icmp_v6(ipvs, skb, &r, state->hook, &iphdr);
 }
 #endif
 
@@ -1949,7 +1923,6 @@
 	/* After packet filtering, change source only for VS/NAT */
 	{
 		.hook		= ip_vs_reply4,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP_PRI_NAT_SRC - 2,
@@ -1959,7 +1932,6 @@
 	 * applied to IPVS. */
 	{
 		.hook		= ip_vs_remote_request4,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP_PRI_NAT_SRC - 1,
@@ -1967,7 +1939,6 @@
 	/* Before ip_vs_in, change source only for VS/NAT */
 	{
 		.hook		= ip_vs_local_reply4,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP_PRI_NAT_DST + 1,
@@ -1975,7 +1946,6 @@
 	/* After mangle, schedule and forward local requests */
 	{
 		.hook		= ip_vs_local_request4,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP_PRI_NAT_DST + 2,
@@ -1984,7 +1954,6 @@
 	 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
 	{
 		.hook		= ip_vs_forward_icmp,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_FORWARD,
 		.priority	= 99,
@@ -1992,7 +1961,6 @@
 	/* After packet filtering, change source only for VS/NAT */
 	{
 		.hook		= ip_vs_reply4,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV4,
 		.hooknum	= NF_INET_FORWARD,
 		.priority	= 100,
@@ -2001,7 +1969,6 @@
 	/* After packet filtering, change source only for VS/NAT */
 	{
 		.hook		= ip_vs_reply6,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP6_PRI_NAT_SRC - 2,
@@ -2011,7 +1978,6 @@
 	 * applied to IPVS. */
 	{
 		.hook		= ip_vs_remote_request6,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_IN,
 		.priority	= NF_IP6_PRI_NAT_SRC - 1,
@@ -2019,7 +1985,6 @@
 	/* Before ip_vs_in, change source only for VS/NAT */
 	{
 		.hook		= ip_vs_local_reply6,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP6_PRI_NAT_DST + 1,
@@ -2027,7 +1992,6 @@
 	/* After mangle, schedule and forward local requests */
 	{
 		.hook		= ip_vs_local_request6,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_LOCAL_OUT,
 		.priority	= NF_IP6_PRI_NAT_DST + 2,
@@ -2036,7 +2000,6 @@
 	 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
 	{
 		.hook		= ip_vs_forward_icmp_v6,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_FORWARD,
 		.priority	= 99,
@@ -2044,7 +2007,6 @@
 	/* After packet filtering, change source only for VS/NAT */
 	{
 		.hook		= ip_vs_reply6,
-		.owner		= THIS_MODULE,
 		.pf		= NFPROTO_IPV6,
 		.hooknum	= NF_INET_FORWARD,
 		.priority	= 100,
@@ -2070,22 +2032,22 @@
 	atomic_inc(&ipvs_netns_cnt);
 	net->ipvs = ipvs;
 
-	if (ip_vs_estimator_net_init(net) < 0)
+	if (ip_vs_estimator_net_init(ipvs) < 0)
 		goto estimator_fail;
 
-	if (ip_vs_control_net_init(net) < 0)
+	if (ip_vs_control_net_init(ipvs) < 0)
 		goto control_fail;
 
-	if (ip_vs_protocol_net_init(net) < 0)
+	if (ip_vs_protocol_net_init(ipvs) < 0)
 		goto protocol_fail;
 
-	if (ip_vs_app_net_init(net) < 0)
+	if (ip_vs_app_net_init(ipvs) < 0)
 		goto app_fail;
 
-	if (ip_vs_conn_net_init(net) < 0)
+	if (ip_vs_conn_net_init(ipvs) < 0)
 		goto conn_fail;
 
-	if (ip_vs_sync_net_init(net) < 0)
+	if (ip_vs_sync_net_init(ipvs) < 0)
 		goto sync_fail;
 
 	printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
@@ -2096,15 +2058,15 @@
  */
 
 sync_fail:
-	ip_vs_conn_net_cleanup(net);
+	ip_vs_conn_net_cleanup(ipvs);
 conn_fail:
-	ip_vs_app_net_cleanup(net);
+	ip_vs_app_net_cleanup(ipvs);
 app_fail:
-	ip_vs_protocol_net_cleanup(net);
+	ip_vs_protocol_net_cleanup(ipvs);
 protocol_fail:
-	ip_vs_control_net_cleanup(net);
+	ip_vs_control_net_cleanup(ipvs);
 control_fail:
-	ip_vs_estimator_net_cleanup(net);
+	ip_vs_estimator_net_cleanup(ipvs);
 estimator_fail:
 	net->ipvs = NULL;
 	return -ENOMEM;
@@ -2112,22 +2074,25 @@
 
 static void __net_exit __ip_vs_cleanup(struct net *net)
 {
-	ip_vs_service_net_cleanup(net);	/* ip_vs_flush() with locks */
-	ip_vs_conn_net_cleanup(net);
-	ip_vs_app_net_cleanup(net);
-	ip_vs_protocol_net_cleanup(net);
-	ip_vs_control_net_cleanup(net);
-	ip_vs_estimator_net_cleanup(net);
-	IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	ip_vs_service_net_cleanup(ipvs);	/* ip_vs_flush() with locks */
+	ip_vs_conn_net_cleanup(ipvs);
+	ip_vs_app_net_cleanup(ipvs);
+	ip_vs_protocol_net_cleanup(ipvs);
+	ip_vs_control_net_cleanup(ipvs);
+	ip_vs_estimator_net_cleanup(ipvs);
+	IP_VS_DBG(2, "ipvs netns %d released\n", ipvs->gen);
 	net->ipvs = NULL;
 }
 
 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	EnterFunction(2);
-	net_ipvs(net)->enable = 0;	/* Disable packet reception */
+	ipvs->enable = 0;	/* Disable packet reception */
 	smp_wmb();
-	ip_vs_sync_net_cleanup(net);
+	ip_vs_sync_net_cleanup(ipvs);
 	LeaveFunction(2);
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 7338827..e7c1b05 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -228,7 +228,7 @@
 
 	update_defense_level(ipvs);
 	if (atomic_read(&ipvs->dropentry))
-		ip_vs_random_dropentry(ipvs->net);
+		ip_vs_random_dropentry(ipvs);
 	schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
 }
 #endif
@@ -263,7 +263,7 @@
  *	Returns hash value for virtual service
  */
 static inline unsigned int
-ip_vs_svc_hashkey(struct net *net, int af, unsigned int proto,
+ip_vs_svc_hashkey(struct netns_ipvs *ipvs, int af, unsigned int proto,
 		  const union nf_inet_addr *addr, __be16 port)
 {
 	register unsigned int porth = ntohs(port);
@@ -276,7 +276,7 @@
 			    addr->ip6[2]^addr->ip6[3];
 #endif
 	ahash = ntohl(addr_fold);
-	ahash ^= ((size_t) net >> 8);
+	ahash ^= ((size_t) ipvs >> 8);
 
 	return (proto ^ ahash ^ (porth >> IP_VS_SVC_TAB_BITS) ^ porth) &
 	       IP_VS_SVC_TAB_MASK;
@@ -285,9 +285,9 @@
 /*
  *	Returns hash value of fwmark for virtual service lookup
  */
-static inline unsigned int ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
+static inline unsigned int ip_vs_svc_fwm_hashkey(struct netns_ipvs *ipvs, __u32 fwmark)
 {
-	return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
+	return (((size_t)ipvs>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
 }
 
 /*
@@ -309,14 +309,14 @@
 		/*
 		 *  Hash it by <netns,protocol,addr,port> in ip_vs_svc_table
 		 */
-		hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol,
+		hash = ip_vs_svc_hashkey(svc->ipvs, svc->af, svc->protocol,
 					 &svc->addr, svc->port);
 		hlist_add_head_rcu(&svc->s_list, &ip_vs_svc_table[hash]);
 	} else {
 		/*
 		 *  Hash it by fwmark in svc_fwm_table
 		 */
-		hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark);
+		hash = ip_vs_svc_fwm_hashkey(svc->ipvs, svc->fwmark);
 		hlist_add_head_rcu(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
 	}
 
@@ -357,21 +357,21 @@
  *	Get service by {netns, proto,addr,port} in the service table.
  */
 static inline struct ip_vs_service *
-__ip_vs_service_find(struct net *net, int af, __u16 protocol,
+__ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u16 protocol,
 		     const union nf_inet_addr *vaddr, __be16 vport)
 {
 	unsigned int hash;
 	struct ip_vs_service *svc;
 
 	/* Check for "full" addressed entries */
-	hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport);
+	hash = ip_vs_svc_hashkey(ipvs, af, protocol, vaddr, vport);
 
 	hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[hash], s_list) {
 		if ((svc->af == af)
 		    && ip_vs_addr_equal(af, &svc->addr, vaddr)
 		    && (svc->port == vport)
 		    && (svc->protocol == protocol)
-		    && net_eq(svc->net, net)) {
+		    && (svc->ipvs == ipvs)) {
 			/* HIT */
 			return svc;
 		}
@@ -385,17 +385,17 @@
  *	Get service by {fwmark} in the service table.
  */
 static inline struct ip_vs_service *
-__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
+__ip_vs_svc_fwm_find(struct netns_ipvs *ipvs, int af, __u32 fwmark)
 {
 	unsigned int hash;
 	struct ip_vs_service *svc;
 
 	/* Check for fwmark addressed entries */
-	hash = ip_vs_svc_fwm_hashkey(net, fwmark);
+	hash = ip_vs_svc_fwm_hashkey(ipvs, fwmark);
 
 	hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[hash], f_list) {
 		if (svc->fwmark == fwmark && svc->af == af
-		    && net_eq(svc->net, net)) {
+		    && (svc->ipvs == ipvs)) {
 			/* HIT */
 			return svc;
 		}
@@ -406,17 +406,16 @@
 
 /* Find service, called under RCU lock */
 struct ip_vs_service *
-ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol,
 		   const union nf_inet_addr *vaddr, __be16 vport)
 {
 	struct ip_vs_service *svc;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	/*
 	 *	Check the table hashed by fwmark first
 	 */
 	if (fwmark) {
-		svc = __ip_vs_svc_fwm_find(net, af, fwmark);
+		svc = __ip_vs_svc_fwm_find(ipvs, af, fwmark);
 		if (svc)
 			goto out;
 	}
@@ -425,7 +424,7 @@
 	 *	Check the table hashed by <protocol,addr,port>
 	 *	for "full" addressed entries
 	 */
-	svc = __ip_vs_service_find(net, af, protocol, vaddr, vport);
+	svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, vport);
 
 	if (svc == NULL
 	    && protocol == IPPROTO_TCP
@@ -435,7 +434,7 @@
 		 * Check if ftp service entry exists, the packet
 		 * might belong to FTP data connections.
 		 */
-		svc = __ip_vs_service_find(net, af, protocol, vaddr, FTPPORT);
+		svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, FTPPORT);
 	}
 
 	if (svc == NULL
@@ -443,7 +442,7 @@
 		/*
 		 * Check if the catch-all port (port zero) exists
 		 */
-		svc = __ip_vs_service_find(net, af, protocol, vaddr, 0);
+		svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, 0);
 	}
 
   out:
@@ -543,10 +542,9 @@
 }
 
 /* Check if real service by <proto,addr,port> is present */
-bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
+bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
 			    const union nf_inet_addr *daddr, __be16 dport)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	unsigned int hash;
 	struct ip_vs_dest *dest;
 
@@ -601,7 +599,7 @@
  * on the backup.
  * Called under RCU lock, no refcnt is returned.
  */
-struct ip_vs_dest *ip_vs_find_dest(struct net  *net, int svc_af, int dest_af,
+struct ip_vs_dest *ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af,
 				   const union nf_inet_addr *daddr,
 				   __be16 dport,
 				   const union nf_inet_addr *vaddr,
@@ -612,7 +610,7 @@
 	struct ip_vs_service *svc;
 	__be16 port = dport;
 
-	svc = ip_vs_service_find(net, svc_af, fwmark, protocol, vaddr, vport);
+	svc = ip_vs_service_find(ipvs, svc_af, fwmark, protocol, vaddr, vport);
 	if (!svc)
 		return NULL;
 	if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
@@ -660,7 +658,7 @@
 		     const union nf_inet_addr *daddr, __be16 dport)
 {
 	struct ip_vs_dest *dest;
-	struct netns_ipvs *ipvs = net_ipvs(svc->net);
+	struct netns_ipvs *ipvs = svc->ipvs;
 
 	/*
 	 * Find the destination in trash
@@ -715,10 +713,9 @@
  *  are expired, and the refcnt of each destination in the trash must
  *  be 0, so we simply release them here.
  */
-static void ip_vs_trash_cleanup(struct net *net)
+static void ip_vs_trash_cleanup(struct netns_ipvs *ipvs)
 {
 	struct ip_vs_dest *dest, *nxt;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	del_timer_sync(&ipvs->dest_trash_timer);
 	/* No need to use dest_trash_lock */
@@ -788,7 +785,7 @@
 __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
 		    struct ip_vs_dest_user_kern *udest, int add)
 {
-	struct netns_ipvs *ipvs = net_ipvs(svc->net);
+	struct netns_ipvs *ipvs = svc->ipvs;
 	struct ip_vs_service *old_svc;
 	struct ip_vs_scheduler *sched;
 	int conn_flags;
@@ -843,7 +840,7 @@
 	spin_unlock_bh(&dest->dst_lock);
 
 	if (add) {
-		ip_vs_start_estimator(svc->net, &dest->stats);
+		ip_vs_start_estimator(svc->ipvs, &dest->stats);
 		list_add_rcu(&dest->n_list, &svc->destinations);
 		svc->num_dests++;
 		sched = rcu_dereference_protected(svc->scheduler, 1);
@@ -874,12 +871,12 @@
 		atype = ipv6_addr_type(&udest->addr.in6);
 		if ((!(atype & IPV6_ADDR_UNICAST) ||
 			atype & IPV6_ADDR_LINKLOCAL) &&
-			!__ip_vs_addr_is_local_v6(svc->net, &udest->addr.in6))
+			!__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6))
 			return -EINVAL;
 	} else
 #endif
 	{
-		atype = inet_addr_type(svc->net, udest->addr.ip);
+		atype = inet_addr_type(svc->ipvs->net, udest->addr.ip);
 		if (atype != RTN_LOCAL && atype != RTN_UNICAST)
 			return -EINVAL;
 	}
@@ -1036,12 +1033,10 @@
 /*
  *	Delete a destination (must be already unlinked from the service)
  */
-static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest,
+static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest,
 			     bool cleanup)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
-	ip_vs_stop_estimator(net, &dest->stats);
+	ip_vs_stop_estimator(ipvs, &dest->stats);
 
 	/*
 	 *  Remove it from the d-linked list with the real services.
@@ -1079,7 +1074,7 @@
 	svc->num_dests--;
 
 	if (dest->af != svc->af)
-		net_ipvs(svc->net)->mixed_address_family_dests--;
+		svc->ipvs->mixed_address_family_dests--;
 
 	if (svcupd) {
 		struct ip_vs_scheduler *sched;
@@ -1120,7 +1115,7 @@
 	/*
 	 *	Delete the destination
 	 */
-	__ip_vs_del_dest(svc->net, dest, false);
+	__ip_vs_del_dest(svc->ipvs, dest, false);
 
 	LeaveFunction(2);
 
@@ -1129,8 +1124,7 @@
 
 static void ip_vs_dest_trash_expire(unsigned long data)
 {
-	struct net *net = (struct net *) data;
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct netns_ipvs *ipvs = (struct netns_ipvs *)data;
 	struct ip_vs_dest *dest, *next;
 	unsigned long now = jiffies;
 
@@ -1163,14 +1157,13 @@
  *	Add a service into the service hash table
  */
 static int
-ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
+ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
 		  struct ip_vs_service **svc_p)
 {
 	int ret = 0, i;
 	struct ip_vs_scheduler *sched = NULL;
 	struct ip_vs_pe *pe = NULL;
 	struct ip_vs_service *svc = NULL;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	/* increase the module use count */
 	ip_vs_use_count_inc();
@@ -1237,7 +1230,7 @@
 	svc->flags = u->flags;
 	svc->timeout = u->timeout * HZ;
 	svc->netmask = u->netmask;
-	svc->net = net;
+	svc->ipvs = ipvs;
 
 	INIT_LIST_HEAD(&svc->destinations);
 	spin_lock_init(&svc->sched_lock);
@@ -1261,7 +1254,7 @@
 	else if (svc->port == 0)
 		atomic_inc(&ipvs->nullsvc_counter);
 
-	ip_vs_start_estimator(net, &svc->stats);
+	ip_vs_start_estimator(ipvs, &svc->stats);
 
 	/* Count only IPv4 services for old get/setsockopt interface */
 	if (svc->af == AF_INET)
@@ -1381,7 +1374,7 @@
 	struct ip_vs_dest *dest, *nxt;
 	struct ip_vs_scheduler *old_sched;
 	struct ip_vs_pe *old_pe;
-	struct netns_ipvs *ipvs = net_ipvs(svc->net);
+	struct netns_ipvs *ipvs = svc->ipvs;
 
 	pr_info("%s: enter\n", __func__);
 
@@ -1389,7 +1382,7 @@
 	if (svc->af == AF_INET)
 		ipvs->num_services--;
 
-	ip_vs_stop_estimator(svc->net, &svc->stats);
+	ip_vs_stop_estimator(svc->ipvs, &svc->stats);
 
 	/* Unbind scheduler */
 	old_sched = rcu_dereference_protected(svc->scheduler, 1);
@@ -1405,7 +1398,7 @@
 	 */
 	list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
 		__ip_vs_unlink_dest(svc, dest, 0);
-		__ip_vs_del_dest(svc->net, dest, cleanup);
+		__ip_vs_del_dest(svc->ipvs, dest, cleanup);
 	}
 
 	/*
@@ -1456,7 +1449,7 @@
 /*
  *	Flush all the virtual services
  */
-static int ip_vs_flush(struct net *net, bool cleanup)
+static int ip_vs_flush(struct netns_ipvs *ipvs, bool cleanup)
 {
 	int idx;
 	struct ip_vs_service *svc;
@@ -1468,7 +1461,7 @@
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry_safe(svc, n, &ip_vs_svc_table[idx],
 					  s_list) {
-			if (net_eq(svc->net, net))
+			if (svc->ipvs == ipvs)
 				ip_vs_unlink_service(svc, cleanup);
 		}
 	}
@@ -1479,7 +1472,7 @@
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry_safe(svc, n, &ip_vs_svc_fwm_table[idx],
 					  f_list) {
-			if (net_eq(svc->net, net))
+			if (svc->ipvs == ipvs)
 				ip_vs_unlink_service(svc, cleanup);
 		}
 	}
@@ -1491,12 +1484,12 @@
  *	Delete service by {netns} in the service table.
  *	Called by __ip_vs_cleanup()
  */
-void ip_vs_service_net_cleanup(struct net *net)
+void ip_vs_service_net_cleanup(struct netns_ipvs *ipvs)
 {
 	EnterFunction(2);
 	/* Check for "full" addressed entries */
 	mutex_lock(&__ip_vs_mutex);
-	ip_vs_flush(net, true);
+	ip_vs_flush(ipvs, true);
 	mutex_unlock(&__ip_vs_mutex);
 	LeaveFunction(2);
 }
@@ -1540,7 +1533,7 @@
 	mutex_lock(&__ip_vs_mutex);
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
-			if (net_eq(svc->net, net)) {
+			if (svc->ipvs == ipvs) {
 				list_for_each_entry(dest, &svc->destinations,
 						    n_list) {
 					ip_vs_forget_dev(dest, dev);
@@ -1549,7 +1542,7 @@
 		}
 
 		hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
-			if (net_eq(svc->net, net)) {
+			if (svc->ipvs == ipvs) {
 				list_for_each_entry(dest, &svc->destinations,
 						    n_list) {
 					ip_vs_forget_dev(dest, dev);
@@ -1583,26 +1576,26 @@
 	return 0;
 }
 
-static int ip_vs_zero_all(struct net *net)
+static int ip_vs_zero_all(struct netns_ipvs *ipvs)
 {
 	int idx;
 	struct ip_vs_service *svc;
 
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
-			if (net_eq(svc->net, net))
+			if (svc->ipvs == ipvs)
 				ip_vs_zero_service(svc);
 		}
 	}
 
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
-			if (net_eq(svc->net, net))
+			if (svc->ipvs == ipvs)
 				ip_vs_zero_service(svc);
 		}
 	}
 
-	ip_vs_zero_stats(&net_ipvs(net)->tot_stats);
+	ip_vs_zero_stats(&ipvs->tot_stats);
 	return 0;
 }
 
@@ -1615,7 +1608,7 @@
 proc_do_defense_mode(struct ctl_table *table, int write,
 		     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-	struct net *net = current->nsproxy->net_ns;
+	struct netns_ipvs *ipvs = table->extra2;
 	int *valp = table->data;
 	int val = *valp;
 	int rc;
@@ -1626,7 +1619,7 @@
 			/* Restore the correct value */
 			*valp = val;
 		} else {
-			update_defense_level(net_ipvs(net));
+			update_defense_level(ipvs);
 		}
 	}
 	return rc;
@@ -1901,6 +1894,7 @@
 static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
 {
 	struct net *net = seq_file_net(seq);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_iter *iter = seq->private;
 	int idx;
 	struct ip_vs_service *svc;
@@ -1908,7 +1902,7 @@
 	/* look in hash by protocol */
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[idx], s_list) {
-			if (net_eq(svc->net, net) && pos-- == 0) {
+			if ((svc->ipvs == ipvs) && pos-- == 0) {
 				iter->table = ip_vs_svc_table;
 				iter->bucket = idx;
 				return svc;
@@ -1920,7 +1914,7 @@
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[idx],
 					 f_list) {
-			if (net_eq(svc->net, net) && pos-- == 0) {
+			if ((svc->ipvs == ipvs) && pos-- == 0) {
 				iter->table = ip_vs_svc_fwm_table;
 				iter->bucket = idx;
 				return svc;
@@ -2208,7 +2202,7 @@
 /*
  *	Set timeout values for tcp tcpfin udp in the timeout_table.
  */
-static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u)
+static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user *u)
 {
 #if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
 	struct ip_vs_proto_data *pd;
@@ -2221,13 +2215,13 @@
 
 #ifdef CONFIG_IP_VS_PROTO_TCP
 	if (u->tcp_timeout) {
-		pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+		pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
 		pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
 			= u->tcp_timeout * HZ;
 	}
 
 	if (u->tcp_fin_timeout) {
-		pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+		pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
 		pd->timeout_table[IP_VS_TCP_S_FIN_WAIT]
 			= u->tcp_fin_timeout * HZ;
 	}
@@ -2235,7 +2229,7 @@
 
 #ifdef CONFIG_IP_VS_PROTO_UDP
 	if (u->udp_timeout) {
-		pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+		pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
 		pd->timeout_table[IP_VS_UDP_S_NORMAL]
 			= u->udp_timeout * HZ;
 	}
@@ -2356,12 +2350,12 @@
 			cfg.syncid = dm->syncid;
 			rtnl_lock();
 			mutex_lock(&ipvs->sync_mutex);
-			ret = start_sync_thread(net, &cfg, dm->state);
+			ret = start_sync_thread(ipvs, &cfg, dm->state);
 			mutex_unlock(&ipvs->sync_mutex);
 			rtnl_unlock();
 		} else {
 			mutex_lock(&ipvs->sync_mutex);
-			ret = stop_sync_thread(net, dm->state);
+			ret = stop_sync_thread(ipvs, dm->state);
 			mutex_unlock(&ipvs->sync_mutex);
 		}
 		goto out_dec;
@@ -2370,11 +2364,11 @@
 	mutex_lock(&__ip_vs_mutex);
 	if (cmd == IP_VS_SO_SET_FLUSH) {
 		/* Flush the virtual service */
-		ret = ip_vs_flush(net, false);
+		ret = ip_vs_flush(ipvs, false);
 		goto out_unlock;
 	} else if (cmd == IP_VS_SO_SET_TIMEOUT) {
 		/* Set timeout values for (tcp tcpfin udp) */
-		ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
+		ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg);
 		goto out_unlock;
 	}
 
@@ -2389,7 +2383,7 @@
 	if (cmd == IP_VS_SO_SET_ZERO) {
 		/* if no service address is set, zero counters in all */
 		if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
-			ret = ip_vs_zero_all(net);
+			ret = ip_vs_zero_all(ipvs);
 			goto out_unlock;
 		}
 	}
@@ -2407,10 +2401,10 @@
 	/* Lookup the exact service by <protocol, addr, port> or fwmark */
 	rcu_read_lock();
 	if (usvc.fwmark == 0)
-		svc = __ip_vs_service_find(net, usvc.af, usvc.protocol,
+		svc = __ip_vs_service_find(ipvs, usvc.af, usvc.protocol,
 					   &usvc.addr, usvc.port);
 	else
-		svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark);
+		svc = __ip_vs_svc_fwm_find(ipvs, usvc.af, usvc.fwmark);
 	rcu_read_unlock();
 
 	if (cmd != IP_VS_SO_SET_ADD
@@ -2424,7 +2418,7 @@
 		if (svc != NULL)
 			ret = -EEXIST;
 		else
-			ret = ip_vs_add_service(net, &usvc, &svc);
+			ret = ip_vs_add_service(ipvs, &usvc, &svc);
 		break;
 	case IP_VS_SO_SET_EDIT:
 		ret = ip_vs_edit_service(svc, &usvc);
@@ -2483,7 +2477,7 @@
 }
 
 static inline int
-__ip_vs_get_service_entries(struct net *net,
+__ip_vs_get_service_entries(struct netns_ipvs *ipvs,
 			    const struct ip_vs_get_services *get,
 			    struct ip_vs_get_services __user *uptr)
 {
@@ -2495,7 +2489,7 @@
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
 			/* Only expose IPv4 entries to old interface */
-			if (svc->af != AF_INET || !net_eq(svc->net, net))
+			if (svc->af != AF_INET || (svc->ipvs != ipvs))
 				continue;
 
 			if (count >= get->num_services)
@@ -2514,7 +2508,7 @@
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
 			/* Only expose IPv4 entries to old interface */
-			if (svc->af != AF_INET || !net_eq(svc->net, net))
+			if (svc->af != AF_INET || (svc->ipvs != ipvs))
 				continue;
 
 			if (count >= get->num_services)
@@ -2534,7 +2528,7 @@
 }
 
 static inline int
-__ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
+__ip_vs_get_dest_entries(struct netns_ipvs *ipvs, const struct ip_vs_get_dests *get,
 			 struct ip_vs_get_dests __user *uptr)
 {
 	struct ip_vs_service *svc;
@@ -2543,9 +2537,9 @@
 
 	rcu_read_lock();
 	if (get->fwmark)
-		svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark);
+		svc = __ip_vs_svc_fwm_find(ipvs, AF_INET, get->fwmark);
 	else
-		svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr,
+		svc = __ip_vs_service_find(ipvs, AF_INET, get->protocol, &addr,
 					   get->port);
 	rcu_read_unlock();
 
@@ -2590,7 +2584,7 @@
 }
 
 static inline void
-__ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
+__ip_vs_get_timeouts(struct netns_ipvs *ipvs, struct ip_vs_timeout_user *u)
 {
 #if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
 	struct ip_vs_proto_data *pd;
@@ -2599,12 +2593,12 @@
 	memset(u, 0, sizeof (*u));
 
 #ifdef CONFIG_IP_VS_PROTO_TCP
-	pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+	pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
 	u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
 	u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
 #endif
 #ifdef CONFIG_IP_VS_PROTO_UDP
-	pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+	pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
 	u->udp_timeout =
 			pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
 #endif
@@ -2723,7 +2717,7 @@
 			ret = -EINVAL;
 			goto out;
 		}
-		ret = __ip_vs_get_service_entries(net, get, user);
+		ret = __ip_vs_get_service_entries(ipvs, get, user);
 	}
 	break;
 
@@ -2737,9 +2731,9 @@
 		addr.ip = entry->addr;
 		rcu_read_lock();
 		if (entry->fwmark)
-			svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
+			svc = __ip_vs_svc_fwm_find(ipvs, AF_INET, entry->fwmark);
 		else
-			svc = __ip_vs_service_find(net, AF_INET,
+			svc = __ip_vs_service_find(ipvs, AF_INET,
 						   entry->protocol, &addr,
 						   entry->port);
 		rcu_read_unlock();
@@ -2765,7 +2759,7 @@
 			ret = -EINVAL;
 			goto out;
 		}
-		ret = __ip_vs_get_dest_entries(net, get, user);
+		ret = __ip_vs_get_dest_entries(ipvs, get, user);
 	}
 	break;
 
@@ -2773,7 +2767,7 @@
 	{
 		struct ip_vs_timeout_user t;
 
-		__ip_vs_get_timeouts(net, &t);
+		__ip_vs_get_timeouts(ipvs, &t);
 		if (copy_to_user(user, &t, sizeof(t)) != 0)
 			ret = -EFAULT;
 	}
@@ -3008,12 +3002,13 @@
 	int idx = 0, i;
 	int start = cb->args[0];
 	struct ip_vs_service *svc;
-	struct net *net = skb_sknet(skb);
+	struct net *net = sock_net(skb->sk);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	mutex_lock(&__ip_vs_mutex);
 	for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
-			if (++idx <= start || !net_eq(svc->net, net))
+			if (++idx <= start || (svc->ipvs != ipvs))
 				continue;
 			if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
 				idx--;
@@ -3024,7 +3019,7 @@
 
 	for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
 		hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
-			if (++idx <= start || !net_eq(svc->net, net))
+			if (++idx <= start || (svc->ipvs != ipvs))
 				continue;
 			if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
 				idx--;
@@ -3040,7 +3035,7 @@
 	return skb->len;
 }
 
-static int ip_vs_genl_parse_service(struct net *net,
+static int ip_vs_genl_parse_service(struct netns_ipvs *ipvs,
 				    struct ip_vs_service_user_kern *usvc,
 				    struct nlattr *nla, int full_entry,
 				    struct ip_vs_service **ret_svc)
@@ -3085,9 +3080,9 @@
 
 	rcu_read_lock();
 	if (usvc->fwmark)
-		svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark);
+		svc = __ip_vs_svc_fwm_find(ipvs, usvc->af, usvc->fwmark);
 	else
-		svc = __ip_vs_service_find(net, usvc->af, usvc->protocol,
+		svc = __ip_vs_service_find(ipvs, usvc->af, usvc->protocol,
 					   &usvc->addr, usvc->port);
 	rcu_read_unlock();
 	*ret_svc = svc;
@@ -3125,14 +3120,14 @@
 	return 0;
 }
 
-static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
+static struct ip_vs_service *ip_vs_genl_find_service(struct netns_ipvs *ipvs,
 						     struct nlattr *nla)
 {
 	struct ip_vs_service_user_kern usvc;
 	struct ip_vs_service *svc;
 	int ret;
 
-	ret = ip_vs_genl_parse_service(net, &usvc, nla, 0, &svc);
+	ret = ip_vs_genl_parse_service(ipvs, &usvc, nla, 0, &svc);
 	return ret ? ERR_PTR(ret) : svc;
 }
 
@@ -3207,7 +3202,8 @@
 	struct ip_vs_service *svc;
 	struct ip_vs_dest *dest;
 	struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
-	struct net *net = skb_sknet(skb);
+	struct net *net = sock_net(skb->sk);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	mutex_lock(&__ip_vs_mutex);
 
@@ -3217,7 +3213,7 @@
 		goto out_err;
 
 
-	svc = ip_vs_genl_find_service(net, attrs[IPVS_CMD_ATTR_SERVICE]);
+	svc = ip_vs_genl_find_service(ipvs, attrs[IPVS_CMD_ATTR_SERVICE]);
 	if (IS_ERR(svc) || svc == NULL)
 		goto out_err;
 
@@ -3353,7 +3349,7 @@
 static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
 				   struct netlink_callback *cb)
 {
-	struct net *net = skb_sknet(skb);
+	struct net *net = sock_net(skb->sk);
 	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	mutex_lock(&ipvs->sync_mutex);
@@ -3379,9 +3375,8 @@
 	return skb->len;
 }
 
-static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
+static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ipvs_sync_daemon_cfg c;
 	struct nlattr *a;
 	int ret;
@@ -3438,33 +3433,32 @@
 
 	rtnl_lock();
 	mutex_lock(&ipvs->sync_mutex);
-	ret = start_sync_thread(net, &c,
+	ret = start_sync_thread(ipvs, &c,
 				nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
 	mutex_unlock(&ipvs->sync_mutex);
 	rtnl_unlock();
 	return ret;
 }
 
-static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
+static int ip_vs_genl_del_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	int ret;
 
 	if (!attrs[IPVS_DAEMON_ATTR_STATE])
 		return -EINVAL;
 
 	mutex_lock(&ipvs->sync_mutex);
-	ret = stop_sync_thread(net,
+	ret = stop_sync_thread(ipvs,
 			       nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
 	mutex_unlock(&ipvs->sync_mutex);
 	return ret;
 }
 
-static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
+static int ip_vs_genl_set_config(struct netns_ipvs *ipvs, struct nlattr **attrs)
 {
 	struct ip_vs_timeout_user t;
 
-	__ip_vs_get_timeouts(net, &t);
+	__ip_vs_get_timeouts(ipvs, &t);
 
 	if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
 		t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
@@ -3476,17 +3470,15 @@
 	if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
 		t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
 
-	return ip_vs_set_timeout(net, &t);
+	return ip_vs_set_timeout(ipvs, &t);
 }
 
 static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
 {
 	int ret = -EINVAL, cmd;
-	struct net *net;
-	struct netns_ipvs *ipvs;
+	struct net *net = sock_net(skb->sk);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
-	net = skb_sknet(skb);
-	ipvs = net_ipvs(net);
 	cmd = info->genlhdr->cmd;
 
 	if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
@@ -3499,9 +3491,9 @@
 			goto out;
 
 		if (cmd == IPVS_CMD_NEW_DAEMON)
-			ret = ip_vs_genl_new_daemon(net, daemon_attrs);
+			ret = ip_vs_genl_new_daemon(ipvs, daemon_attrs);
 		else
-			ret = ip_vs_genl_del_daemon(net, daemon_attrs);
+			ret = ip_vs_genl_del_daemon(ipvs, daemon_attrs);
 	}
 
 out:
@@ -3515,22 +3507,22 @@
 	struct ip_vs_dest_user_kern udest;
 	int ret = 0, cmd;
 	int need_full_svc = 0, need_full_dest = 0;
-	struct net *net;
+	struct net *net = sock_net(skb->sk);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
-	net = skb_sknet(skb);
 	cmd = info->genlhdr->cmd;
 
 	mutex_lock(&__ip_vs_mutex);
 
 	if (cmd == IPVS_CMD_FLUSH) {
-		ret = ip_vs_flush(net, false);
+		ret = ip_vs_flush(ipvs, false);
 		goto out;
 	} else if (cmd == IPVS_CMD_SET_CONFIG) {
-		ret = ip_vs_genl_set_config(net, info->attrs);
+		ret = ip_vs_genl_set_config(ipvs, info->attrs);
 		goto out;
 	} else if (cmd == IPVS_CMD_ZERO &&
 		   !info->attrs[IPVS_CMD_ATTR_SERVICE]) {
-		ret = ip_vs_zero_all(net);
+		ret = ip_vs_zero_all(ipvs);
 		goto out;
 	}
 
@@ -3540,7 +3532,7 @@
 	if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
 		need_full_svc = 1;
 
-	ret = ip_vs_genl_parse_service(net, &usvc,
+	ret = ip_vs_genl_parse_service(ipvs, &usvc,
 				       info->attrs[IPVS_CMD_ATTR_SERVICE],
 				       need_full_svc, &svc);
 	if (ret)
@@ -3579,7 +3571,7 @@
 			/* The synchronization protocol is incompatible
 			 * with mixed family services
 			 */
-			if (net_ipvs(net)->sync_state) {
+			if (ipvs->sync_state) {
 				ret = -EINVAL;
 				goto out;
 			}
@@ -3599,7 +3591,7 @@
 	switch (cmd) {
 	case IPVS_CMD_NEW_SERVICE:
 		if (svc == NULL)
-			ret = ip_vs_add_service(net, &usvc, &svc);
+			ret = ip_vs_add_service(ipvs, &usvc, &svc);
 		else
 			ret = -EEXIST;
 		break;
@@ -3637,9 +3629,9 @@
 	struct sk_buff *msg;
 	void *reply;
 	int ret, cmd, reply_cmd;
-	struct net *net;
+	struct net *net = sock_net(skb->sk);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
-	net = skb_sknet(skb);
 	cmd = info->genlhdr->cmd;
 
 	if (cmd == IPVS_CMD_GET_SERVICE)
@@ -3668,7 +3660,7 @@
 	{
 		struct ip_vs_service *svc;
 
-		svc = ip_vs_genl_find_service(net,
+		svc = ip_vs_genl_find_service(ipvs,
 					      info->attrs[IPVS_CMD_ATTR_SERVICE]);
 		if (IS_ERR(svc)) {
 			ret = PTR_ERR(svc);
@@ -3689,7 +3681,7 @@
 	{
 		struct ip_vs_timeout_user t;
 
-		__ip_vs_get_timeouts(net, &t);
+		__ip_vs_get_timeouts(ipvs, &t);
 #ifdef CONFIG_IP_VS_PROTO_TCP
 		if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP,
 				t.tcp_timeout) ||
@@ -3844,10 +3836,10 @@
  * per netns intit/exit func.
  */
 #ifdef CONFIG_SYSCTL
-static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
+static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
 {
+	struct net *net = ipvs->net;
 	int idx;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ctl_table *tbl;
 
 	atomic_set(&ipvs->dropentry, 0);
@@ -3866,6 +3858,10 @@
 	} else
 		tbl = vs_vars;
 	/* Initialize sysctl defaults */
+	for (idx = 0; idx < ARRAY_SIZE(vs_vars); idx++) {
+		if (tbl[idx].proc_handler == proc_do_defense_mode)
+			tbl[idx].extra2 = ipvs;
+	}
 	idx = 0;
 	ipvs->sysctl_amemthresh = 1024;
 	tbl[idx++].data = &ipvs->sysctl_amemthresh;
@@ -3916,7 +3912,7 @@
 			kfree(tbl);
 		return -ENOMEM;
 	}
-	ip_vs_start_estimator(net, &ipvs->tot_stats);
+	ip_vs_start_estimator(ipvs, &ipvs->tot_stats);
 	ipvs->sysctl_tbl = tbl;
 	/* Schedule defense work */
 	INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
@@ -3925,14 +3921,14 @@
 	return 0;
 }
 
-static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
+static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct net *net = ipvs->net;
 
 	cancel_delayed_work_sync(&ipvs->defense_work);
 	cancel_work_sync(&ipvs->defense_work.work);
 	unregister_net_sysctl_table(ipvs->sysctl_hdr);
-	ip_vs_stop_estimator(net, &ipvs->tot_stats);
+	ip_vs_stop_estimator(ipvs, &ipvs->tot_stats);
 
 	if (!net_eq(net, &init_net))
 		kfree(ipvs->sysctl_tbl);
@@ -3940,8 +3936,8 @@
 
 #else
 
-static int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
-static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { }
+static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) { return 0; }
+static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs) { }
 
 #endif
 
@@ -3949,10 +3945,10 @@
 	.notifier_call = ip_vs_dst_event,
 };
 
-int __net_init ip_vs_control_net_init(struct net *net)
+int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
 {
+	struct net *net = ipvs->net;
 	int i, idx;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	/* Initialize rs_table */
 	for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
@@ -3961,7 +3957,7 @@
 	INIT_LIST_HEAD(&ipvs->dest_trash);
 	spin_lock_init(&ipvs->dest_trash_lock);
 	setup_timer(&ipvs->dest_trash_timer, ip_vs_dest_trash_expire,
-		    (unsigned long) net);
+		    (unsigned long) ipvs);
 	atomic_set(&ipvs->ftpsvc_counter, 0);
 	atomic_set(&ipvs->nullsvc_counter, 0);
 
@@ -3983,7 +3979,7 @@
 	proc_create("ip_vs_stats_percpu", 0, net->proc_net,
 		    &ip_vs_stats_percpu_fops);
 
-	if (ip_vs_control_net_init_sysctl(net))
+	if (ip_vs_control_net_init_sysctl(ipvs))
 		goto err;
 
 	return 0;
@@ -3993,12 +3989,12 @@
 	return -ENOMEM;
 }
 
-void __net_exit ip_vs_control_net_cleanup(struct net *net)
+void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct net *net = ipvs->net;
 
-	ip_vs_trash_cleanup(net);
-	ip_vs_control_net_cleanup_sysctl(net);
+	ip_vs_trash_cleanup(ipvs);
+	ip_vs_control_net_cleanup_sysctl(ipvs);
 	remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
 	remove_proc_entry("ip_vs_stats", net->proc_net);
 	remove_proc_entry("ip_vs", net->proc_net);
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index ef0eb0a..457c6c1 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -102,10 +102,8 @@
 	struct ip_vs_estimator *e;
 	struct ip_vs_stats *s;
 	u64 rate;
-	struct net *net = (struct net *)arg;
-	struct netns_ipvs *ipvs;
+	struct netns_ipvs *ipvs = (struct netns_ipvs *)arg;
 
-	ipvs = net_ipvs(net);
 	spin_lock(&ipvs->est_lock);
 	list_for_each_entry(e, &ipvs->est_list, list) {
 		s = container_of(e, struct ip_vs_stats, est);
@@ -140,9 +138,8 @@
 	mod_timer(&ipvs->est_timer, jiffies + 2*HZ);
 }
 
-void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats)
+void ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_estimator *est = &stats->est;
 
 	INIT_LIST_HEAD(&est->list);
@@ -152,9 +149,8 @@
 	spin_unlock_bh(&ipvs->est_lock);
 }
 
-void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats)
+void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_estimator *est = &stats->est;
 
 	spin_lock_bh(&ipvs->est_lock);
@@ -192,18 +188,16 @@
 	dst->outbps = (e->outbps + 0xF) >> 5;
 }
 
-int __net_init ip_vs_estimator_net_init(struct net *net)
+int __net_init ip_vs_estimator_net_init(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
 	INIT_LIST_HEAD(&ipvs->est_list);
 	spin_lock_init(&ipvs->est_lock);
-	setup_timer(&ipvs->est_timer, estimation_timer, (unsigned long)net);
+	setup_timer(&ipvs->est_timer, estimation_timer, (unsigned long)ipvs);
 	mod_timer(&ipvs->est_timer, jiffies + 2 * HZ);
 	return 0;
 }
 
-void __net_exit ip_vs_estimator_net_cleanup(struct net *net)
+void __net_exit ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs)
 {
-	del_timer_sync(&net_ipvs(net)->est_timer);
+	del_timer_sync(&ipvs->est_timer);
 }
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 5d3daae..d30c327 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -181,7 +181,6 @@
 	int ret = 0;
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn *ct;
-	struct net *net;
 
 	*diff = 0;
 
@@ -223,14 +222,14 @@
 		 */
 		{
 			struct ip_vs_conn_param p;
-			ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+			ip_vs_conn_fill_param(cp->ipvs, AF_INET,
 					      iph->protocol, &from, port,
 					      &cp->caddr, 0, &p);
 			n_cp = ip_vs_conn_out_get(&p);
 		}
 		if (!n_cp) {
 			struct ip_vs_conn_param p;
-			ip_vs_conn_fill_param(ip_vs_conn_net(cp),
+			ip_vs_conn_fill_param(cp->ipvs,
 					      AF_INET, IPPROTO_TCP, &cp->caddr,
 					      0, &cp->vaddr, port, &p);
 			/* As above, this is ipv4 only */
@@ -289,9 +288,8 @@
 		 * would be adjusted twice.
 		 */
 
-		net = skb_net(skb);
 		cp->app_data = NULL;
-		ip_vs_tcp_conn_listen(net, n_cp);
+		ip_vs_tcp_conn_listen(n_cp);
 		ip_vs_conn_put(n_cp);
 		return ret;
 	}
@@ -320,7 +318,6 @@
 	union nf_inet_addr to;
 	__be16 port;
 	struct ip_vs_conn *n_cp;
-	struct net *net;
 
 	/* no diff required for incoming packets */
 	*diff = 0;
@@ -392,7 +389,7 @@
 
 	{
 		struct ip_vs_conn_param p;
-		ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+		ip_vs_conn_fill_param(cp->ipvs, AF_INET,
 				      iph->protocol, &to, port, &cp->vaddr,
 				      htons(ntohs(cp->vport)-1), &p);
 		n_cp = ip_vs_conn_in_get(&p);
@@ -413,8 +410,7 @@
 	/*
 	 *	Move tunnel to listen state
 	 */
-	net = skb_net(skb);
-	ip_vs_tcp_conn_listen(net, n_cp);
+	ip_vs_tcp_conn_listen(n_cp);
 	ip_vs_conn_put(n_cp);
 
 	return 1;
@@ -447,14 +443,14 @@
 	if (!ipvs)
 		return -ENOENT;
 
-	app = register_ip_vs_app(net, &ip_vs_ftp);
+	app = register_ip_vs_app(ipvs, &ip_vs_ftp);
 	if (IS_ERR(app))
 		return PTR_ERR(app);
 
 	for (i = 0; i < ports_count; i++) {
 		if (!ports[i])
 			continue;
-		ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
+		ret = register_ip_vs_app_inc(ipvs, app, app->protocol, ports[i]);
 		if (ret)
 			goto err_unreg;
 		pr_info("%s: loaded support on port[%d] = %d\n",
@@ -463,7 +459,7 @@
 	return 0;
 
 err_unreg:
-	unregister_ip_vs_app(net, &ip_vs_ftp);
+	unregister_ip_vs_app(ipvs, &ip_vs_ftp);
 	return ret;
 }
 /*
@@ -471,7 +467,12 @@
  */
 static void __ip_vs_ftp_exit(struct net *net)
 {
-	unregister_ip_vs_app(net, &ip_vs_ftp);
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	if (!ipvs)
+		return;
+
+	unregister_ip_vs_app(ipvs, &ip_vs_ftp);
 }
 
 static struct pernet_operations ip_vs_ftp_ops = {
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 127f140..cccf4d6 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -250,8 +250,7 @@
 static int sysctl_lblc_expiration(struct ip_vs_service *svc)
 {
 #ifdef CONFIG_SYSCTL
-	struct netns_ipvs *ipvs = net_ipvs(svc->net);
-	return ipvs->sysctl_lblc_expiration;
+	return svc->ipvs->sysctl_lblc_expiration;
 #else
 	return DEFAULT_EXPIRATION;
 #endif
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 2229d2d..796d70e 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -415,8 +415,7 @@
 static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
 {
 #ifdef CONFIG_SYSCTL
-	struct netns_ipvs *ipvs = net_ipvs(svc->net);
-	return ipvs->sysctl_lblcr_expiration;
+	return svc->ipvs->sysctl_lblcr_expiration;
 #else
 	return DEFAULT_EXPIRATION;
 #endif
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 1361845..30434fb 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -161,7 +161,7 @@
 
 	/* RS->CLIENT */
 	orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
-	ip_vs_conn_fill_param(net, exp->tuple.src.l3num, orig->dst.protonum,
+	ip_vs_conn_fill_param(net_ipvs(net), exp->tuple.src.l3num, orig->dst.protonum,
 			      &orig->src.u3, orig->src.u.tcp.port,
 			      &orig->dst.u3, orig->dst.u.tcp.port, &p);
 	cp = ip_vs_conn_out_get(&p);
@@ -274,8 +274,7 @@
 		" for conn " FMT_CONN "\n",
 		__func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
 
-	h = nf_conntrack_find_get(ip_vs_conn_net(cp), &nf_ct_zone_dflt,
-				  &tuple);
+	h = nf_conntrack_find_get(cp->ipvs->net, &nf_ct_zone_dflt, &tuple);
 	if (h) {
 		ct = nf_ct_tuplehash_to_ctrack(h);
 		/* Show what happens instead of calling nf_ct_kill() */
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 939f7fb..8ae4807 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -63,9 +63,8 @@
  *	register an ipvs protocols netns related data
  */
 static int
-register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
+register_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_protocol *pp)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
 	struct ip_vs_proto_data *pd =
 			kzalloc(sizeof(struct ip_vs_proto_data), GFP_KERNEL);
@@ -79,7 +78,7 @@
 	atomic_set(&pd->appcnt, 0);	/* Init app counter */
 
 	if (pp->init_netns != NULL) {
-		int ret = pp->init_netns(net, pd);
+		int ret = pp->init_netns(ipvs, pd);
 		if (ret) {
 			/* unlink an free proto data */
 			ipvs->proto_data_table[hash] = pd->next;
@@ -116,9 +115,8 @@
  *	unregister an ipvs protocols netns data
  */
 static int
-unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
+unregister_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_proto_data **pd_p;
 	unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol);
 
@@ -127,7 +125,7 @@
 		if (*pd_p == pd) {
 			*pd_p = pd->next;
 			if (pd->pp->exit_netns != NULL)
-				pd->pp->exit_netns(net, pd);
+				pd->pp->exit_netns(ipvs, pd);
 			kfree(pd);
 			return 0;
 		}
@@ -156,8 +154,8 @@
 /*
  *	get ip_vs_protocol object data by netns and proto
  */
-static struct ip_vs_proto_data *
-__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
+struct ip_vs_proto_data *
+ip_vs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
 {
 	struct ip_vs_proto_data *pd;
 	unsigned int hash = IP_VS_PROTO_HASH(proto);
@@ -169,14 +167,6 @@
 
 	return NULL;
 }
-
-struct ip_vs_proto_data *
-ip_vs_proto_data_get(struct net *net, unsigned short proto)
-{
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
-	return __ipvs_proto_data_get(ipvs, proto);
-}
 EXPORT_SYMBOL(ip_vs_proto_data_get);
 
 /*
@@ -317,7 +307,7 @@
 /*
  * per network name-space init
  */
-int __net_init ip_vs_protocol_net_init(struct net *net)
+int __net_init ip_vs_protocol_net_init(struct netns_ipvs *ipvs)
 {
 	int i, ret;
 	static struct ip_vs_protocol *protos[] = {
@@ -339,27 +329,26 @@
 	};
 
 	for (i = 0; i < ARRAY_SIZE(protos); i++) {
-		ret = register_ip_vs_proto_netns(net, protos[i]);
+		ret = register_ip_vs_proto_netns(ipvs, protos[i]);
 		if (ret < 0)
 			goto cleanup;
 	}
 	return 0;
 
 cleanup:
-	ip_vs_protocol_net_cleanup(net);
+	ip_vs_protocol_net_cleanup(ipvs);
 	return ret;
 }
 
-void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
+void __net_exit ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_proto_data *pd;
 	int i;
 
 	/* unregister all the ipvs proto data for this netns */
 	for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
 		while ((pd = ipvs->proto_data_table[i]) != NULL)
-			unregister_ip_vs_proto_netns(net, pd);
+			unregister_ip_vs_proto_netns(ipvs, pd);
 	}
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index be1791d..5320d39 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -41,29 +41,28 @@
 #define PORT_ISAKMP	500
 
 static void
-ah_esp_conn_fill_param_proto(struct net *net, int af,
+ah_esp_conn_fill_param_proto(struct netns_ipvs *ipvs, int af,
 			     const struct ip_vs_iphdr *iph,
 			     struct ip_vs_conn_param *p)
 {
 	if (likely(!ip_vs_iph_inverse(iph)))
-		ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
+		ip_vs_conn_fill_param(ipvs, af, IPPROTO_UDP,
 				      &iph->saddr, htons(PORT_ISAKMP),
 				      &iph->daddr, htons(PORT_ISAKMP), p);
 	else
-		ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
+		ip_vs_conn_fill_param(ipvs, af, IPPROTO_UDP,
 				      &iph->daddr, htons(PORT_ISAKMP),
 				      &iph->saddr, htons(PORT_ISAKMP), p);
 }
 
 static struct ip_vs_conn *
-ah_esp_conn_in_get(int af, const struct sk_buff *skb,
+ah_esp_conn_in_get(struct netns_ipvs *ipvs, int af, const struct sk_buff *skb,
 		   const struct ip_vs_iphdr *iph)
 {
 	struct ip_vs_conn *cp;
 	struct ip_vs_conn_param p;
-	struct net *net = skb_net(skb);
 
-	ah_esp_conn_fill_param_proto(net, af, iph, &p);
+	ah_esp_conn_fill_param_proto(ipvs, af, iph, &p);
 	cp = ip_vs_conn_in_get(&p);
 	if (!cp) {
 		/*
@@ -83,14 +82,13 @@
 
 
 static struct ip_vs_conn *
-ah_esp_conn_out_get(int af, const struct sk_buff *skb,
+ah_esp_conn_out_get(struct netns_ipvs *ipvs, int af, const struct sk_buff *skb,
 		    const struct ip_vs_iphdr *iph)
 {
 	struct ip_vs_conn *cp;
 	struct ip_vs_conn_param p;
-	struct net *net = skb_net(skb);
 
-	ah_esp_conn_fill_param_proto(net, af, iph, &p);
+	ah_esp_conn_fill_param_proto(ipvs, af, iph, &p);
 	cp = ip_vs_conn_out_get(&p);
 	if (!cp) {
 		IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
@@ -106,7 +104,8 @@
 
 
 static int
-ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
+ah_esp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
+		     struct ip_vs_proto_data *pd,
 		     int *verdict, struct ip_vs_conn **cpp,
 		     struct ip_vs_iphdr *iph)
 {
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 2026fca..010ddee 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -9,13 +9,12 @@
 #include <net/ip_vs.h>
 
 static int
-sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
+sctp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
+		   struct ip_vs_proto_data *pd,
 		   int *verdict, struct ip_vs_conn **cpp,
 		   struct ip_vs_iphdr *iph)
 {
-	struct net *net;
 	struct ip_vs_service *svc;
-	struct netns_ipvs *ipvs;
 	sctp_chunkhdr_t _schunkh, *sch;
 	sctp_sctphdr_t *sh, _sctph;
 	__be16 _ports[2], *ports = NULL;
@@ -40,14 +39,12 @@
 		return 0;
 	}
 
-	net = skb_net(skb);
-	ipvs = net_ipvs(net);
 	rcu_read_lock();
 	if (likely(!ip_vs_iph_inverse(iph)))
-		svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
+		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
 					 &iph->daddr, ports[1]);
 	else
-		svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
+		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
 					 &iph->saddr, ports[0]);
 	if (svc) {
 		int ignored;
@@ -486,14 +483,13 @@
 		& SCTP_APP_TAB_MASK;
 }
 
-static int sctp_register_app(struct net *net, struct ip_vs_app *inc)
+static int sctp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
 	struct ip_vs_app *i;
 	__u16 hash;
 	__be16 port = inc->port;
 	int ret = 0;
-	struct netns_ipvs *ipvs = net_ipvs(net);
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_SCTP);
 
 	hash = sctp_app_hashkey(port);
 
@@ -510,9 +506,9 @@
 	return ret;
 }
 
-static void sctp_unregister_app(struct net *net, struct ip_vs_app *inc)
+static void sctp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_SCTP);
 
 	atomic_dec(&pd->appcnt);
 	list_del_rcu(&inc->p_list);
@@ -520,7 +516,7 @@
 
 static int sctp_app_conn_bind(struct ip_vs_conn *cp)
 {
-	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
+	struct netns_ipvs *ipvs = cp->ipvs;
 	int hash;
 	struct ip_vs_app *inc;
 	int result = 0;
@@ -561,10 +557,8 @@
  *   timeouts is netns related now.
  * ---------------------------------------------
  */
-static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __ip_vs_sctp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
 	ip_vs_init_hash_table(ipvs->sctp_apps, SCTP_APP_TAB_SIZE);
 	pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
 							sizeof(sctp_timeouts));
@@ -573,7 +567,7 @@
 	return 0;
 }
 
-static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
+static void __ip_vs_sctp_exit(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
 	kfree(pd->timeout_table);
 }
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 8f43cf6..d7024b2 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -32,19 +32,15 @@
 #include <net/ip_vs.h>
 
 static int
-tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
+tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
+		  struct ip_vs_proto_data *pd,
 		  int *verdict, struct ip_vs_conn **cpp,
 		  struct ip_vs_iphdr *iph)
 {
-	struct net *net;
 	struct ip_vs_service *svc;
 	struct tcphdr _tcph, *th;
-	struct netns_ipvs *ipvs;
 	__be16 _ports[2], *ports = NULL;
 
-	net = skb_net(skb);
-	ipvs = net_ipvs(net);
-
 	/* In the event of icmp, we're only guaranteed to have the first 8
 	 * bytes of the transport header, so we only check the rest of the
 	 * TCP packet for non-ICMP packets
@@ -70,10 +66,10 @@
 	rcu_read_lock();
 
 	if (likely(!ip_vs_iph_inverse(iph)))
-		svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
+		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
 					 &iph->daddr, ports[1]);
 	else
-		svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
+		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
 					 &iph->saddr, ports[0]);
 
 	if (svc) {
@@ -595,14 +591,13 @@
 }
 
 
-static int tcp_register_app(struct net *net, struct ip_vs_app *inc)
+static int tcp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
 	struct ip_vs_app *i;
 	__u16 hash;
 	__be16 port = inc->port;
 	int ret = 0;
-	struct netns_ipvs *ipvs = net_ipvs(net);
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
 
 	hash = tcp_app_hashkey(port);
 
@@ -621,9 +616,9 @@
 
 
 static void
-tcp_unregister_app(struct net *net, struct ip_vs_app *inc)
+tcp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
 
 	atomic_dec(&pd->appcnt);
 	list_del_rcu(&inc->p_list);
@@ -633,7 +628,7 @@
 static int
 tcp_app_conn_bind(struct ip_vs_conn *cp)
 {
-	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
+	struct netns_ipvs *ipvs = cp->ipvs;
 	int hash;
 	struct ip_vs_app *inc;
 	int result = 0;
@@ -677,9 +672,9 @@
 /*
  *	Set LISTEN timeout. (ip_vs_conn_put will setup timer)
  */
-void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
+void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
 {
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(cp->ipvs, IPPROTO_TCP);
 
 	spin_lock_bh(&cp->lock);
 	cp->state = IP_VS_TCP_S_LISTEN;
@@ -692,10 +687,8 @@
  *   timeouts is netns related now.
  * ---------------------------------------------
  */
-static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __ip_vs_tcp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
 	ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
 	pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
 							sizeof(tcp_timeouts));
@@ -705,7 +698,7 @@
 	return 0;
 }
 
-static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
+static void __ip_vs_tcp_exit(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
 	kfree(pd->timeout_table);
 }
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index f3aa821..e494e9a 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -29,11 +29,11 @@
 #include <net/ip6_checksum.h>
 
 static int
-udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
+udp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb,
+		  struct ip_vs_proto_data *pd,
 		  int *verdict, struct ip_vs_conn **cpp,
 		  struct ip_vs_iphdr *iph)
 {
-	struct net *net;
 	struct ip_vs_service *svc;
 	struct udphdr _udph, *uh;
 	__be16 _ports[2], *ports = NULL;
@@ -53,19 +53,18 @@
 		return 0;
 	}
 
-	net = skb_net(skb);
 	rcu_read_lock();
 	if (likely(!ip_vs_iph_inverse(iph)))
-		svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
+		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
 					 &iph->daddr, ports[1]);
 	else
-		svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
+		svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol,
 					 &iph->saddr, ports[0]);
 
 	if (svc) {
 		int ignored;
 
-		if (ip_vs_todrop(net_ipvs(net))) {
+		if (ip_vs_todrop(ipvs)) {
 			/*
 			 * It seems that we are very loaded.
 			 * We have to drop this packet :(
@@ -363,14 +362,13 @@
 }
 
 
-static int udp_register_app(struct net *net, struct ip_vs_app *inc)
+static int udp_register_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
 	struct ip_vs_app *i;
 	__u16 hash;
 	__be16 port = inc->port;
 	int ret = 0;
-	struct netns_ipvs *ipvs = net_ipvs(net);
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
 
 	hash = udp_app_hashkey(port);
 
@@ -389,9 +387,9 @@
 
 
 static void
-udp_unregister_app(struct net *net, struct ip_vs_app *inc)
+udp_unregister_app(struct netns_ipvs *ipvs, struct ip_vs_app *inc)
 {
-	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP);
 
 	atomic_dec(&pd->appcnt);
 	list_del_rcu(&inc->p_list);
@@ -400,7 +398,7 @@
 
 static int udp_app_conn_bind(struct ip_vs_conn *cp)
 {
-	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
+	struct netns_ipvs *ipvs = cp->ipvs;
 	int hash;
 	struct ip_vs_app *inc;
 	int result = 0;
@@ -471,10 +469,8 @@
 	cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
 }
 
-static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
+static int __udp_init(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
 	ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
 	pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
 							sizeof(udp_timeouts));
@@ -483,7 +479,7 @@
 	return 0;
 }
 
-static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
+static void __udp_exit(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd)
 {
 	kfree(pd->timeout_table);
 }
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 43f1409..803001a 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -193,7 +193,7 @@
 #define IPVS_OPT_F_PARAM	(1 << (IPVS_OPT_PARAM-1))
 
 struct ip_vs_sync_thread_data {
-	struct net *net;
+	struct netns_ipvs *ipvs;
 	struct socket *sock;
 	char *buf;
 	int id;
@@ -533,10 +533,9 @@
  *      Version 0 , could be switched in by sys_ctl.
  *      Add an ip_vs_conn information into the current sync_buff.
  */
-static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
+static void ip_vs_sync_conn_v0(struct netns_ipvs *ipvs, struct ip_vs_conn *cp,
 			       int pkts)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_sync_mesg_v0 *m;
 	struct ip_vs_sync_conn_v0 *s;
 	struct ip_vs_sync_buff *buff;
@@ -615,7 +614,7 @@
 			pkts = atomic_add_return(1, &cp->in_pkts);
 		else
 			pkts = sysctl_sync_threshold(ipvs);
-		ip_vs_sync_conn(net, cp, pkts);
+		ip_vs_sync_conn(ipvs, cp, pkts);
 	}
 }
 
@@ -624,9 +623,8 @@
  *      Called by ip_vs_in.
  *      Sending Version 1 messages
  */
-void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts)
+void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_sync_mesg *m;
 	union ip_vs_sync_conn *s;
 	struct ip_vs_sync_buff *buff;
@@ -637,7 +635,7 @@
 
 	/* Handle old version of the protocol */
 	if (sysctl_sync_ver(ipvs) == 0) {
-		ip_vs_sync_conn_v0(net, cp, pkts);
+		ip_vs_sync_conn_v0(ipvs, cp, pkts);
 		return;
 	}
 	/* Do not sync ONE PACKET */
@@ -784,21 +782,21 @@
  *  fill_param used by version 1
  */
 static inline int
-ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc,
+ip_vs_conn_fill_param_sync(struct netns_ipvs *ipvs, int af, union ip_vs_sync_conn *sc,
 			   struct ip_vs_conn_param *p,
 			   __u8 *pe_data, unsigned int pe_data_len,
 			   __u8 *pe_name, unsigned int pe_name_len)
 {
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6)
-		ip_vs_conn_fill_param(net, af, sc->v6.protocol,
+		ip_vs_conn_fill_param(ipvs, af, sc->v6.protocol,
 				      (const union nf_inet_addr *)&sc->v6.caddr,
 				      sc->v6.cport,
 				      (const union nf_inet_addr *)&sc->v6.vaddr,
 				      sc->v6.vport, p);
 	else
 #endif
-		ip_vs_conn_fill_param(net, af, sc->v4.protocol,
+		ip_vs_conn_fill_param(ipvs, af, sc->v4.protocol,
 				      (const union nf_inet_addr *)&sc->v4.caddr,
 				      sc->v4.cport,
 				      (const union nf_inet_addr *)&sc->v4.vaddr,
@@ -837,7 +835,7 @@
  *  Param: ...
  *         timeout is in sec.
  */
-static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
+static void ip_vs_proc_conn(struct netns_ipvs *ipvs, struct ip_vs_conn_param *param,
 			    unsigned int flags, unsigned int state,
 			    unsigned int protocol, unsigned int type,
 			    const union nf_inet_addr *daddr, __be16 dport,
@@ -846,7 +844,6 @@
 {
 	struct ip_vs_dest *dest;
 	struct ip_vs_conn *cp;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
 		cp = ip_vs_conn_in_get(param);
@@ -904,7 +901,7 @@
 		 * with synchronization, so we can make the assumption that
 		 * the svc_af is the same as the dest_af
 		 */
-		dest = ip_vs_find_dest(net, type, type, daddr, dport,
+		dest = ip_vs_find_dest(ipvs, type, type, daddr, dport,
 				       param->vaddr, param->vport, protocol,
 				       fwmark, flags);
 
@@ -941,7 +938,7 @@
 	} else {
 		struct ip_vs_proto_data *pd;
 
-		pd = ip_vs_proto_data_get(net, protocol);
+		pd = ip_vs_proto_data_get(ipvs, protocol);
 		if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table)
 			cp->timeout = pd->timeout_table[state];
 		else
@@ -953,7 +950,7 @@
 /*
  *  Process received multicast message for Version 0
  */
-static void ip_vs_process_message_v0(struct net *net, const char *buffer,
+static void ip_vs_process_message_v0(struct netns_ipvs *ipvs, const char *buffer,
 				     const size_t buflen)
 {
 	struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer;
@@ -1009,14 +1006,14 @@
 			}
 		}
 
-		ip_vs_conn_fill_param(net, AF_INET, s->protocol,
+		ip_vs_conn_fill_param(ipvs, AF_INET, s->protocol,
 				      (const union nf_inet_addr *)&s->caddr,
 				      s->cport,
 				      (const union nf_inet_addr *)&s->vaddr,
 				      s->vport, &param);
 
 		/* Send timeout as Zero */
-		ip_vs_proc_conn(net, &param, flags, state, s->protocol, AF_INET,
+		ip_vs_proc_conn(ipvs, &param, flags, state, s->protocol, AF_INET,
 				(union nf_inet_addr *)&s->daddr, s->dport,
 				0, 0, opt);
 	}
@@ -1067,7 +1064,7 @@
 /*
  *   Process a Version 1 sync. connection
  */
-static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
+static inline int ip_vs_proc_sync_conn(struct netns_ipvs *ipvs, __u8 *p, __u8 *msg_end)
 {
 	struct ip_vs_sync_conn_options opt;
 	union  ip_vs_sync_conn *s;
@@ -1171,21 +1168,21 @@
 			state = 0;
 		}
 	}
-	if (ip_vs_conn_fill_param_sync(net, af, s, &param, pe_data,
+	if (ip_vs_conn_fill_param_sync(ipvs, af, s, &param, pe_data,
 				       pe_data_len, pe_name, pe_name_len)) {
 		retc = 50;
 		goto out;
 	}
 	/* If only IPv4, just silent skip IPv6 */
 	if (af == AF_INET)
-		ip_vs_proc_conn(net, &param, flags, state, s->v4.protocol, af,
+		ip_vs_proc_conn(ipvs, &param, flags, state, s->v4.protocol, af,
 				(union nf_inet_addr *)&s->v4.daddr, s->v4.dport,
 				ntohl(s->v4.timeout), ntohl(s->v4.fwmark),
 				(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
 				);
 #ifdef CONFIG_IP_VS_IPV6
 	else
-		ip_vs_proc_conn(net, &param, flags, state, s->v6.protocol, af,
+		ip_vs_proc_conn(ipvs, &param, flags, state, s->v6.protocol, af,
 				(union nf_inet_addr *)&s->v6.daddr, s->v6.dport,
 				ntohl(s->v6.timeout), ntohl(s->v6.fwmark),
 				(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
@@ -1204,10 +1201,9 @@
  *      ip_vs_conn entries.
  *      Handles Version 0 & 1
  */
-static void ip_vs_process_message(struct net *net, __u8 *buffer,
+static void ip_vs_process_message(struct netns_ipvs *ipvs, __u8 *buffer,
 				  const size_t buflen)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer;
 	__u8 *p, *msg_end;
 	int i, nr_conns;
@@ -1257,7 +1253,7 @@
 				return;
 			}
 			/* Process a single sync_conn */
-			retc = ip_vs_proc_sync_conn(net, p, msg_end);
+			retc = ip_vs_proc_sync_conn(ipvs, p, msg_end);
 			if (retc < 0) {
 				IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n",
 					     retc);
@@ -1268,7 +1264,7 @@
 		}
 	} else {
 		/* Old type of message */
-		ip_vs_process_message_v0(net, buffer, buflen);
+		ip_vs_process_message_v0(ipvs, buffer, buflen);
 		return;
 	}
 }
@@ -1493,16 +1489,15 @@
 /*
  *      Set up sending multicast socket over UDP
  */
-static struct socket *make_send_sock(struct net *net, int id)
+static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	/* multicast addr */
 	union ipvs_sockaddr mcast_addr;
 	struct socket *sock;
 	int result, salen;
 
 	/* First create a socket */
-	result = sock_create_kern(net, ipvs->mcfg.mcast_af, SOCK_DGRAM,
+	result = sock_create_kern(ipvs->net, ipvs->mcfg.mcast_af, SOCK_DGRAM,
 				  IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
@@ -1550,16 +1545,15 @@
 /*
  *      Set up receiving multicast socket over UDP
  */
-static struct socket *make_receive_sock(struct net *net, int id)
+static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	/* multicast addr */
 	union ipvs_sockaddr mcast_addr;
 	struct socket *sock;
 	int result, salen;
 
 	/* First create a socket */
-	result = sock_create_kern(net, ipvs->bcfg.mcast_af, SOCK_DGRAM,
+	result = sock_create_kern(ipvs->net, ipvs->bcfg.mcast_af, SOCK_DGRAM,
 				  IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
@@ -1687,7 +1681,7 @@
 static int sync_thread_master(void *data)
 {
 	struct ip_vs_sync_thread_data *tinfo = data;
-	struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
+	struct netns_ipvs *ipvs = tinfo->ipvs;
 	struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id];
 	struct sock *sk = tinfo->sock->sk;
 	struct ip_vs_sync_buff *sb;
@@ -1743,7 +1737,7 @@
 static int sync_thread_backup(void *data)
 {
 	struct ip_vs_sync_thread_data *tinfo = data;
-	struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
+	struct netns_ipvs *ipvs = tinfo->ipvs;
 	int len;
 
 	pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
@@ -1765,7 +1759,7 @@
 				break;
 			}
 
-			ip_vs_process_message(tinfo->net, tinfo->buf, len);
+			ip_vs_process_message(ipvs, tinfo->buf, len);
 		}
 	}
 
@@ -1778,13 +1772,12 @@
 }
 
 
-int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *c,
+int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
 		      int state)
 {
 	struct ip_vs_sync_thread_data *tinfo;
 	struct task_struct **array = NULL, *task;
 	struct socket *sock;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct net_device *dev;
 	char *name;
 	int (*threadfn)(void *data);
@@ -1811,7 +1804,7 @@
 	if (!c->mcast_ttl)
 		c->mcast_ttl = 1;
 
-	dev = __dev_get_by_name(net, c->mcast_ifn);
+	dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
 	if (!dev) {
 		pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
 		return -ENODEV;
@@ -1873,9 +1866,9 @@
 	tinfo = NULL;
 	for (id = 0; id < count; id++) {
 		if (state == IP_VS_STATE_MASTER)
-			sock = make_send_sock(net, id);
+			sock = make_send_sock(ipvs, id);
 		else
-			sock = make_receive_sock(net, id);
+			sock = make_receive_sock(ipvs, id);
 		if (IS_ERR(sock)) {
 			result = PTR_ERR(sock);
 			goto outtinfo;
@@ -1883,7 +1876,7 @@
 		tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
 		if (!tinfo)
 			goto outsocket;
-		tinfo->net = net;
+		tinfo->ipvs = ipvs;
 		tinfo->sock = sock;
 		if (state == IP_VS_STATE_BACKUP) {
 			tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
@@ -1947,9 +1940,8 @@
 }
 
 
-int stop_sync_thread(struct net *net, int state)
+int stop_sync_thread(struct netns_ipvs *ipvs, int state)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct task_struct **array;
 	int id;
 	int retc = -EINVAL;
@@ -2015,27 +2007,24 @@
 /*
  * Initialize data struct for each netns
  */
-int __net_init ip_vs_sync_net_init(struct net *net)
+int __net_init ip_vs_sync_net_init(struct netns_ipvs *ipvs)
 {
-	struct netns_ipvs *ipvs = net_ipvs(net);
-
 	__mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key);
 	spin_lock_init(&ipvs->sync_lock);
 	spin_lock_init(&ipvs->sync_buff_lock);
 	return 0;
 }
 
-void ip_vs_sync_net_cleanup(struct net *net)
+void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
 {
 	int retc;
-	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	mutex_lock(&ipvs->sync_mutex);
-	retc = stop_sync_thread(net, IP_VS_STATE_MASTER);
+	retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER);
 	if (retc && retc != -ESRCH)
 		pr_err("Failed to stop Master Daemon\n");
 
-	retc = stop_sync_thread(net, IP_VS_STATE_BACKUP);
+	retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP);
 	if (retc && retc != -ESRCH)
 		pr_err("Failed to stop Backup Daemon\n");
 	mutex_unlock(&ipvs->sync_mutex);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 9dbb7cc..3264cb49 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -212,13 +212,14 @@
 		ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
 }
 
-static inline bool ensure_mtu_is_adequate(int skb_af, int rt_mode,
+static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
+					  int rt_mode,
 					  struct ip_vs_iphdr *ipvsh,
 					  struct sk_buff *skb, int mtu)
 {
 #ifdef CONFIG_IP_VS_IPV6
 	if (skb_af == AF_INET6) {
-		struct net *net = dev_net(skb_dst(skb)->dev);
+		struct net *net = ipvs->net;
 
 		if (unlikely(__mtu_check_toobig_v6(skb, mtu))) {
 			if (!skb->dev)
@@ -233,8 +234,6 @@
 	} else
 #endif
 	{
-		struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
-
 		/* If we're going to tunnel the packet and pmtu discovery
 		 * is disabled, we'll just fragment it anyway
 		 */
@@ -257,11 +256,12 @@
 
 /* Get route to destination or remote server */
 static int
-__ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
+__ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
+		   struct ip_vs_dest *dest,
 		   __be32 daddr, int rt_mode, __be32 *ret_saddr,
 		   struct ip_vs_iphdr *ipvsh)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
+	struct net *net = ipvs->net;
 	struct ip_vs_dest_dst *dest_dst;
 	struct rtable *rt;			/* Route to the other host */
 	int mtu;
@@ -337,7 +337,7 @@
 		maybe_update_pmtu(skb_af, skb, mtu);
 	}
 
-	if (!ensure_mtu_is_adequate(skb_af, rt_mode, ipvsh, skb, mtu))
+	if (!ensure_mtu_is_adequate(ipvs, skb_af, rt_mode, ipvsh, skb, mtu))
 		goto err_put;
 
 	skb_dst_drop(skb);
@@ -403,11 +403,12 @@
  * Get route to destination or remote server
  */
 static int
-__ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
+__ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
+		      struct ip_vs_dest *dest,
 		      struct in6_addr *daddr, struct in6_addr *ret_saddr,
 		      struct ip_vs_iphdr *ipvsh, int do_xfrm, int rt_mode)
 {
-	struct net *net = dev_net(skb_dst(skb)->dev);
+	struct net *net = ipvs->net;
 	struct ip_vs_dest_dst *dest_dst;
 	struct rt6_info *rt;			/* Route to the other host */
 	struct dst_entry *dst;
@@ -485,7 +486,7 @@
 		maybe_update_pmtu(skb_af, skb, mtu);
 	}
 
-	if (!ensure_mtu_is_adequate(skb_af, rt_mode, ipvsh, skb, mtu))
+	if (!ensure_mtu_is_adequate(ipvs, skb_af, rt_mode, ipvsh, skb, mtu))
 		goto err_put;
 
 	skb_dst_drop(skb);
@@ -574,8 +575,8 @@
 		skb_forward_csum(skb);
 		if (!skb->sk)
 			skb_sender_cpu_clear(skb);
-		NF_HOOK(pf, NF_INET_LOCAL_OUT, ip_vs_conn_net(cp), NULL, skb,
-			NULL, skb_dst(skb)->dev, dst_output_okfn);
+		NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
+			NULL, skb_dst(skb)->dev, dst_output);
 	} else
 		ret = NF_ACCEPT;
 
@@ -596,8 +597,8 @@
 		skb_forward_csum(skb);
 		if (!skb->sk)
 			skb_sender_cpu_clear(skb);
-		NF_HOOK(pf, NF_INET_LOCAL_OUT, ip_vs_conn_net(cp), NULL, skb,
-			NULL, skb_dst(skb)->dev, dst_output_okfn);
+		NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
+			NULL, skb_dst(skb)->dev, dst_output);
 	} else
 		ret = NF_ACCEPT;
 	return ret;
@@ -630,7 +631,7 @@
 	EnterFunction(10);
 
 	rcu_read_lock();
-	if (__ip_vs_get_out_rt(cp->af, skb, NULL, iph->daddr,
+	if (__ip_vs_get_out_rt(cp->ipvs, cp->af, skb, NULL, iph->daddr,
 			       IP_VS_RT_MODE_NON_LOCAL, NULL, ipvsh) < 0)
 		goto tx_error;
 
@@ -662,7 +663,8 @@
 	EnterFunction(10);
 
 	rcu_read_lock();
-	if (__ip_vs_get_out_rt_v6(cp->af, skb, NULL, &iph->daddr, NULL,
+	if (__ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, NULL,
+				  &iph->daddr, NULL,
 				  ipvsh, 0, IP_VS_RT_MODE_NON_LOCAL) < 0)
 		goto tx_error;
 
@@ -709,7 +711,7 @@
 	}
 
 	was_input = rt_is_input_route(skb_rtable(skb));
-	local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip,
+	local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
 				   IP_VS_RT_MODE_LOCAL |
 				   IP_VS_RT_MODE_NON_LOCAL |
 				   IP_VS_RT_MODE_RDR, NULL, ipvsh);
@@ -798,7 +800,8 @@
 		IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
 	}
 
-	local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
+	local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
+				      &cp->daddr.in6,
 				      NULL, ipvsh, 0,
 				      IP_VS_RT_MODE_LOCAL |
 				      IP_VS_RT_MODE_NON_LOCAL |
@@ -971,8 +974,8 @@
 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 		  struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
 {
-	struct net *net = skb_net(skb);
-	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct netns_ipvs *ipvs = cp->ipvs;
+	struct net *net = ipvs->net;
 	struct rtable *rt;			/* Route to the other host */
 	__be32 saddr;				/* Source for tunnel */
 	struct net_device *tdev;		/* Device to other host */
@@ -988,7 +991,7 @@
 	EnterFunction(10);
 
 	rcu_read_lock();
-	local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip,
+	local = __ip_vs_get_out_rt(ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
 				   IP_VS_RT_MODE_LOCAL |
 				   IP_VS_RT_MODE_NON_LOCAL |
 				   IP_VS_RT_MODE_CONNECT |
@@ -1046,7 +1049,7 @@
 
 	ret = ip_vs_tunnel_xmit_prepare(skb, cp);
 	if (ret == NF_ACCEPT)
-		ip_local_out(skb);
+		ip_local_out(net, skb->sk, skb);
 	else if (ret == NF_DROP)
 		kfree_skb(skb);
 	rcu_read_unlock();
@@ -1082,7 +1085,8 @@
 	EnterFunction(10);
 
 	rcu_read_lock();
-	local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
+	local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
+				      &cp->daddr.in6,
 				      &saddr, ipvsh, 1,
 				      IP_VS_RT_MODE_LOCAL |
 				      IP_VS_RT_MODE_NON_LOCAL |
@@ -1137,7 +1141,7 @@
 
 	ret = ip_vs_tunnel_xmit_prepare(skb, cp);
 	if (ret == NF_ACCEPT)
-		ip6_local_out(skb);
+		ip6_local_out(cp->ipvs->net, skb->sk, skb);
 	else if (ret == NF_DROP)
 		kfree_skb(skb);
 	rcu_read_unlock();
@@ -1169,7 +1173,7 @@
 	EnterFunction(10);
 
 	rcu_read_lock();
-	local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip,
+	local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
 				   IP_VS_RT_MODE_LOCAL |
 				   IP_VS_RT_MODE_NON_LOCAL |
 				   IP_VS_RT_MODE_KNOWN_NH, NULL, ipvsh);
@@ -1208,7 +1212,8 @@
 	EnterFunction(10);
 
 	rcu_read_lock();
-	local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
+	local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
+				      &cp->daddr.in6,
 				      NULL, ipvsh, 0,
 				      IP_VS_RT_MODE_LOCAL |
 				      IP_VS_RT_MODE_NON_LOCAL |
@@ -1277,7 +1282,7 @@
 		  IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
 		  IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
 	rcu_read_lock();
-	local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, rt_mode,
+	local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip, rt_mode,
 				   NULL, iph);
 	if (local < 0)
 		goto tx_error;
@@ -1369,8 +1374,8 @@
 		  IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
 		  IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
 	rcu_read_lock();
-	local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
-				      NULL, ipvsh, 0, rt_mode);
+	local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
+				      &cp->daddr.in6, NULL, ipvsh, 0, rt_mode);
 	if (local < 0)
 		goto tx_error;
 	rt = (struct rt6_info *) skb_dst(skb);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 09d1d19..3cb3cb8 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -940,10 +940,13 @@
 	}
 
 	timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
-	if (timeout_ext)
-		timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
-	else
+	if (timeout_ext) {
+		timeouts = nf_ct_timeout_data(timeout_ext);
+		if (unlikely(!timeouts))
+			timeouts = l4proto->get_timeouts(net);
+	} else {
 		timeouts = l4proto->get_timeouts(net);
+	}
 
 	if (!l4proto->new(ct, skb, dataoff, timeouts)) {
 		nf_conntrack_free(ct);
@@ -952,7 +955,8 @@
 	}
 
 	if (timeout_ext)
-		nf_ct_timeout_ext_add(ct, timeout_ext->timeout, GFP_ATOMIC);
+		nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
+				      GFP_ATOMIC);
 
 	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
 	nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 94a6654..9f52729 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2133,9 +2133,9 @@
 		       struct nf_conntrack_tuple *tuple,
 		       struct nf_conntrack_tuple *mask);
 
-#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
 static size_t
-ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
+ctnetlink_glue_build_size(const struct nf_conn *ct)
 {
 	return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
 	       + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
@@ -2162,8 +2162,19 @@
 	       ;
 }
 
-static int
-ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
+static struct nf_conn *ctnetlink_glue_get_ct(const struct sk_buff *skb,
+					     enum ip_conntrack_info *ctinfo)
+{
+	struct nf_conn *ct;
+
+	ct = nf_ct_get(skb, ctinfo);
+	if (ct && nf_ct_is_untracked(ct))
+		ct = NULL;
+
+	return ct;
+}
+
+static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
 {
 	const struct nf_conntrack_zone *zone;
 	struct nlattr *nest_parms;
@@ -2236,7 +2247,32 @@
 }
 
 static int
-ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
+ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct,
+		     enum ip_conntrack_info ctinfo,
+		     u_int16_t ct_attr, u_int16_t ct_info_attr)
+{
+	struct nlattr *nest_parms;
+
+	nest_parms = nla_nest_start(skb, ct_attr | NLA_F_NESTED);
+	if (!nest_parms)
+		goto nla_put_failure;
+
+	if (__ctnetlink_glue_build(skb, ct) < 0)
+		goto nla_put_failure;
+
+	nla_nest_end(skb, nest_parms);
+
+	if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo)))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -ENOSPC;
+}
+
+static int
+ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
 {
 	int err;
 
@@ -2276,7 +2312,7 @@
 }
 
 static int
-ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
+ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct)
 {
 	struct nlattr *cda[CTA_MAX+1];
 	int ret;
@@ -2286,16 +2322,16 @@
 		return ret;
 
 	spin_lock_bh(&nf_conntrack_expect_lock);
-	ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
+	ret = ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct);
 	spin_unlock_bh(&nf_conntrack_expect_lock);
 
 	return ret;
 }
 
-static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
-				       const struct nf_conn *ct,
-				       struct nf_conntrack_tuple *tuple,
-				       struct nf_conntrack_tuple *mask)
+static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda,
+				    const struct nf_conn *ct,
+				    struct nf_conntrack_tuple *tuple,
+				    struct nf_conntrack_tuple *mask)
 {
 	int err;
 
@@ -2309,8 +2345,8 @@
 }
 
 static int
-ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
-				u32 portid, u32 report)
+ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
+			     u32 portid, u32 report)
 {
 	struct nlattr *cda[CTA_EXPECT_MAX+1];
 	struct nf_conntrack_tuple tuple, mask;
@@ -2322,8 +2358,8 @@
 	if (err < 0)
 		return err;
 
-	err = ctnetlink_nfqueue_exp_parse((const struct nlattr * const *)cda,
-					  ct, &tuple, &mask);
+	err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda,
+				       ct, &tuple, &mask);
 	if (err < 0)
 		return err;
 
@@ -2350,14 +2386,24 @@
 	return 0;
 }
 
-static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
-	.build_size	= ctnetlink_nfqueue_build_size,
-	.build		= ctnetlink_nfqueue_build,
-	.parse		= ctnetlink_nfqueue_parse,
-	.attach_expect	= ctnetlink_nfqueue_attach_expect,
-	.seq_adjust	= nf_ct_tcp_seqadj_set,
+static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
+				  enum ip_conntrack_info ctinfo, int diff)
+{
+	if (!(ct->status & IPS_NAT_MASK))
+		return;
+
+	nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff);
+}
+
+static struct nfnl_ct_hook ctnetlink_glue_hook = {
+	.get_ct		= ctnetlink_glue_get_ct,
+	.build_size	= ctnetlink_glue_build_size,
+	.build		= ctnetlink_glue_build,
+	.parse		= ctnetlink_glue_parse,
+	.attach_expect	= ctnetlink_glue_attach_expect,
+	.seq_adjust	= ctnetlink_glue_seqadj,
 };
-#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
+#endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */
 
 /***********************************************************************
  * EXPECT
@@ -3341,9 +3387,9 @@
 		pr_err("ctnetlink_init: cannot register pernet operations\n");
 		goto err_unreg_exp_subsys;
 	}
-#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
 	/* setup interaction between nf_queue and nf_conntrack_netlink. */
-	RCU_INIT_POINTER(nfq_ct_hook, &ctnetlink_nfqueue_hook);
+	RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook);
 #endif
 	return 0;
 
@@ -3362,8 +3408,8 @@
 	unregister_pernet_subsys(&ctnetlink_net_ops);
 	nfnetlink_subsys_unregister(&ctnl_exp_subsys);
 	nfnetlink_subsys_unregister(&ctnl_subsys);
-#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
-	RCU_INIT_POINTER(nfq_ct_hook, NULL);
+#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
+	RCU_INIT_POINTER(nfnl_ct_hook, NULL);
 #endif
 }
 
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 9f3c3c2..5baa8e2 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -69,19 +69,14 @@
 			dev_put(physdev);
 	}
 #endif
-	/* Drop reference to owner of hook which queued us. */
-	module_put(entry->elem->owner);
 }
 EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
 
 /* Bump dev refs so they don't vanish while packet is out */
-bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
+void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
 {
 	struct nf_hook_state *state = &entry->state;
 
-	if (!try_module_get(entry->elem->owner))
-		return false;
-
 	if (state->in)
 		dev_hold(state->in);
 	if (state->out)
@@ -100,8 +95,6 @@
 			dev_hold(physdev);
 	}
 #endif
-
-	return true;
 }
 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 
@@ -131,22 +124,20 @@
 	const struct nf_queue_handler *qh;
 
 	/* QUEUE == DROP if no one is waiting, to be safe. */
-	rcu_read_lock();
-
 	qh = rcu_dereference(queue_handler);
 	if (!qh) {
 		status = -ESRCH;
-		goto err_unlock;
+		goto err;
 	}
 
 	afinfo = nf_get_afinfo(state->pf);
 	if (!afinfo)
-		goto err_unlock;
+		goto err;
 
 	entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
 	if (!entry) {
 		status = -ENOMEM;
-		goto err_unlock;
+		goto err;
 	}
 
 	*entry = (struct nf_queue_entry) {
@@ -156,16 +147,11 @@
 		.size	= sizeof(*entry) + afinfo->route_key_size,
 	};
 
-	if (!nf_queue_entry_get_refs(entry)) {
-		status = -ECANCELED;
-		goto err_unlock;
-	}
+	nf_queue_entry_get_refs(entry);
 	skb_dst_force(skb);
 	afinfo->saveroute(skb, entry);
 	status = qh->outfn(entry, queuenum);
 
-	rcu_read_unlock();
-
 	if (status < 0) {
 		nf_queue_entry_release_refs(entry);
 		goto err;
@@ -173,8 +159,6 @@
 
 	return 0;
 
-err_unlock:
-	rcu_read_unlock();
 err:
 	kfree(entry);
 	return status;
@@ -187,19 +171,15 @@
 	const struct nf_afinfo *afinfo;
 	int err;
 
-	rcu_read_lock();
-
 	nf_queue_entry_release_refs(entry);
 
 	/* Continue traversal iff userspace said ok... */
-	if (verdict == NF_REPEAT) {
-		elem = list_entry(elem->list.prev, struct nf_hook_ops, list);
-		verdict = NF_ACCEPT;
-	}
+	if (verdict == NF_REPEAT)
+		verdict = elem->hook(elem->priv, skb, &entry->state);
 
 	if (verdict == NF_ACCEPT) {
 		afinfo = nf_get_afinfo(entry->state.pf);
-		if (!afinfo || afinfo->reroute(skb, entry) < 0)
+		if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0)
 			verdict = NF_DROP;
 	}
 
@@ -222,8 +202,6 @@
 		err = nf_queue(skb, elem, &entry->state,
 			       verdict >> NF_VERDICT_QBITS);
 		if (err < 0) {
-			if (err == -ECANCELED)
-				goto next_hook;
 			if (err == -ESRCH &&
 			   (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
 				goto next_hook;
@@ -235,7 +213,7 @@
 	default:
 		kfree_skb(skb);
 	}
-	rcu_read_unlock();
+
 	kfree(entry);
 }
 EXPORT_SYMBOL(nf_reinject);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 4a41eb9..93cc473 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1433,7 +1433,6 @@
 		for (i = 0; i < afi->nops; i++) {
 			ops = &basechain->ops[i];
 			ops->pf		= family;
-			ops->owner	= afi->owner;
 			ops->hooknum	= hooknum;
 			ops->priority	= priority;
 			ops->priv	= chain;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 70277b1..f1d9e88 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -64,7 +64,7 @@
 EXPORT_SYMBOL_GPL(nfnl_unlock);
 
 #ifdef CONFIG_PROVE_LOCKING
-int lockdep_nfnl_is_held(u8 subsys_id)
+bool lockdep_nfnl_is_held(u8 subsys_id)
 {
 	return lockdep_is_held(&table[subsys_id].mutex);
 }
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 476accd..c7a2d0e 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -291,6 +291,34 @@
 	return ret;
 }
 
+static void untimeout(struct nf_conntrack_tuple_hash *i,
+		      struct ctnl_timeout *timeout)
+{
+	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
+	struct nf_conn_timeout *timeout_ext = nf_ct_timeout_find(ct);
+
+	if (timeout_ext && (!timeout || timeout_ext->timeout == timeout))
+		RCU_INIT_POINTER(timeout_ext->timeout, NULL);
+}
+
+static void ctnl_untimeout(struct ctnl_timeout *timeout)
+{
+	struct nf_conntrack_tuple_hash *h;
+	const struct hlist_nulls_node *nn;
+	int i;
+
+	local_bh_disable();
+	for (i = 0; i < init_net.ct.htable_size; i++) {
+		spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+		if (i < init_net.ct.htable_size) {
+			hlist_nulls_for_each_entry(h, nn, &init_net.ct.hash[i], hnnode)
+				untimeout(h, timeout);
+		}
+		spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+	}
+	local_bh_enable();
+}
+
 /* try to delete object, fail if it is still in use. */
 static int ctnl_timeout_try_del(struct ctnl_timeout *timeout)
 {
@@ -301,6 +329,7 @@
 		/* We are protected by nfnl mutex. */
 		list_del_rcu(&timeout->head);
 		nf_ct_l4proto_put(timeout->l4proto);
+		ctnl_untimeout(timeout);
 		kfree_rcu(timeout, rcu_head);
 	} else {
 		/* still in use, restore reference counter. */
@@ -567,6 +596,10 @@
 	pr_info("cttimeout: unregistering from nfnetlink.\n");
 
 	nfnetlink_subsys_unregister(&cttimeout_subsys);
+
+	/* Make sure no conntrack objects refer to custom timeouts anymore. */
+	ctnl_untimeout(NULL);
+
 	list_for_each_entry_safe(cur, tmp, &cttimeout_list, head) {
 		list_del_rcu(&cur->head);
 		/* We are sure that our objects have no clients at this point,
@@ -579,6 +612,7 @@
 	RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
 	RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+	rcu_barrier();
 }
 
 module_init(cttimeout_init);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 4670821..06eb48f 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -27,6 +27,7 @@
 #include <net/netlink.h>
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_log.h>
+#include <linux/netfilter/nf_conntrack_common.h>
 #include <linux/spinlock.h>
 #include <linux/sysctl.h>
 #include <linux/proc_fs.h>
@@ -401,7 +402,9 @@
 			unsigned int hooknum,
 			const struct net_device *indev,
 			const struct net_device *outdev,
-			const char *prefix, unsigned int plen)
+			const char *prefix, unsigned int plen,
+			const struct nfnl_ct_hook *nfnl_ct,
+			struct nf_conn *ct, enum ip_conntrack_info ctinfo)
 {
 	struct nfulnl_msg_packet_hdr pmsg;
 	struct nlmsghdr *nlh;
@@ -538,9 +541,9 @@
 
 	if (skb->tstamp.tv64) {
 		struct nfulnl_msg_packet_timestamp ts;
-		struct timeval tv = ktime_to_timeval(skb->tstamp);
-		ts.sec = cpu_to_be64(tv.tv_sec);
-		ts.usec = cpu_to_be64(tv.tv_usec);
+		struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
+		ts.sec = cpu_to_be64(kts.tv_sec);
+		ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
 
 		if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
 			goto nla_put_failure;
@@ -575,6 +578,10 @@
 			 htonl(atomic_inc_return(&log->global_seq))))
 		goto nla_put_failure;
 
+	if (ct && nfnl_ct->build(inst->skb, ct, ctinfo,
+				 NFULA_CT, NFULA_CT_INFO) < 0)
+		goto nla_put_failure;
+
 	if (data_len) {
 		struct nlattr *nla;
 		int size = nla_attr_size(data_len);
@@ -620,12 +627,16 @@
 		  const struct nf_loginfo *li_user,
 		  const char *prefix)
 {
-	unsigned int size, data_len;
+	size_t size;
+	unsigned int data_len;
 	struct nfulnl_instance *inst;
 	const struct nf_loginfo *li;
 	unsigned int qthreshold;
 	unsigned int plen;
 	struct nfnl_log_net *log = nfnl_log_pernet(net);
+	const struct nfnl_ct_hook *nfnl_ct = NULL;
+	struct nf_conn *ct = NULL;
+	enum ip_conntrack_info uninitialized_var(ctinfo);
 
 	if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
 		li = li_user;
@@ -671,6 +682,14 @@
 		size += nla_total_size(sizeof(u_int32_t));
 	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
 		size += nla_total_size(sizeof(u_int32_t));
+	if (inst->flags & NFULNL_CFG_F_CONNTRACK) {
+		nfnl_ct = rcu_dereference(nfnl_ct_hook);
+		if (nfnl_ct != NULL) {
+			ct = nfnl_ct->get_ct(skb, &ctinfo);
+			if (ct != NULL)
+				size += nfnl_ct->build_size(ct);
+		}
+	}
 
 	qthreshold = inst->qthreshold;
 	/* per-rule qthreshold overrides per-instance */
@@ -715,7 +734,8 @@
 	inst->qlen++;
 
 	__build_packet_message(log, inst, skb, data_len, pf,
-				hooknum, in, out, prefix, plen);
+				hooknum, in, out, prefix, plen,
+				nfnl_ct, ct, ctinfo);
 
 	if (inst->qlen >= qthreshold)
 		__nfulnl_flush(inst);
@@ -805,6 +825,7 @@
 	struct net *net = sock_net(ctnl);
 	struct nfnl_log_net *log = nfnl_log_pernet(net);
 	int ret = 0;
+	u16 flags;
 
 	if (nfula[NFULA_CFG_CMD]) {
 		u_int8_t pf = nfmsg->nfgen_family;
@@ -826,6 +847,28 @@
 		goto out_put;
 	}
 
+	/* Check if we support these flags in first place, dependencies should
+	 * be there too not to break atomicity.
+	 */
+	if (nfula[NFULA_CFG_FLAGS]) {
+		flags = ntohs(nla_get_be16(nfula[NFULA_CFG_FLAGS]));
+
+		if ((flags & NFULNL_CFG_F_CONNTRACK) &&
+		    !rcu_access_pointer(nfnl_ct_hook)) {
+#ifdef CONFIG_MODULES
+			nfnl_unlock(NFNL_SUBSYS_ULOG);
+			request_module("ip_conntrack_netlink");
+			nfnl_lock(NFNL_SUBSYS_ULOG);
+			if (rcu_access_pointer(nfnl_ct_hook)) {
+				ret = -EAGAIN;
+				goto out_put;
+			}
+#endif
+			ret = -EOPNOTSUPP;
+			goto out_put;
+		}
+	}
+
 	if (cmd != NULL) {
 		switch (cmd->command) {
 		case NFULNL_CFG_CMD_BIND:
@@ -854,16 +897,15 @@
 			ret = -ENOTSUPP;
 			break;
 		}
+	} else if (!inst) {
+		ret = -ENODEV;
+		goto out;
 	}
 
 	if (nfula[NFULA_CFG_MODE]) {
-		struct nfulnl_msg_config_mode *params;
-		params = nla_data(nfula[NFULA_CFG_MODE]);
+		struct nfulnl_msg_config_mode *params =
+			nla_data(nfula[NFULA_CFG_MODE]);
 
-		if (!inst) {
-			ret = -ENODEV;
-			goto out;
-		}
 		nfulnl_set_mode(inst, params->copy_mode,
 				ntohl(params->copy_range));
 	}
@@ -871,42 +913,23 @@
 	if (nfula[NFULA_CFG_TIMEOUT]) {
 		__be32 timeout = nla_get_be32(nfula[NFULA_CFG_TIMEOUT]);
 
-		if (!inst) {
-			ret = -ENODEV;
-			goto out;
-		}
 		nfulnl_set_timeout(inst, ntohl(timeout));
 	}
 
 	if (nfula[NFULA_CFG_NLBUFSIZ]) {
 		__be32 nlbufsiz = nla_get_be32(nfula[NFULA_CFG_NLBUFSIZ]);
 
-		if (!inst) {
-			ret = -ENODEV;
-			goto out;
-		}
 		nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz));
 	}
 
 	if (nfula[NFULA_CFG_QTHRESH]) {
 		__be32 qthresh = nla_get_be32(nfula[NFULA_CFG_QTHRESH]);
 
-		if (!inst) {
-			ret = -ENODEV;
-			goto out;
-		}
 		nfulnl_set_qthresh(inst, ntohl(qthresh));
 	}
 
-	if (nfula[NFULA_CFG_FLAGS]) {
-		__be16 flags = nla_get_be16(nfula[NFULA_CFG_FLAGS]);
-
-		if (!inst) {
-			ret = -ENODEV;
-			goto out;
-		}
-		nfulnl_set_flags(inst, ntohs(flags));
-	}
+	if (nfula[NFULA_CFG_FLAGS])
+		nfulnl_set_flags(inst, flags);
 
 out_put:
 	instance_put(inst);
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue.c
similarity index 95%
rename from net/netfilter/nfnetlink_queue_core.c
rename to net/netfilter/nfnetlink_queue.c
index 41583e3..7d81d28 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -28,12 +28,12 @@
 #include <linux/netfilter_bridge.h>
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_queue.h>
+#include <linux/netfilter/nf_conntrack_common.h>
 #include <linux/list.h>
 #include <net/sock.h>
 #include <net/tcp_states.h>
 #include <net/netfilter/nf_queue.h>
 #include <net/netns/generic.h>
-#include <net/netfilter/nfnetlink_queue.h>
 
 #include <linux/atomic.h>
 
@@ -313,6 +313,7 @@
 	struct net_device *outdev;
 	struct nf_conn *ct = NULL;
 	enum ip_conntrack_info uninitialized_var(ctinfo);
+	struct nfnl_ct_hook *nfnl_ct;
 	bool csum_verify;
 	char *secdata = NULL;
 	u32 seclen = 0;
@@ -364,8 +365,14 @@
 		break;
 	}
 
-	if (queue->flags & NFQA_CFG_F_CONNTRACK)
-		ct = nfqnl_ct_get(entskb, &size, &ctinfo);
+	if (queue->flags & NFQA_CFG_F_CONNTRACK) {
+		nfnl_ct = rcu_dereference(nfnl_ct_hook);
+		if (nfnl_ct != NULL) {
+			ct = nfnl_ct->get_ct(entskb, &ctinfo);
+			if (ct != NULL)
+				size += nfnl_ct->build_size(ct);
+		}
+	}
 
 	if (queue->flags & NFQA_CFG_F_UID_GID) {
 		size +=  (nla_total_size(sizeof(u_int32_t))	/* uid */
@@ -493,9 +500,10 @@
 
 	if (entskb->tstamp.tv64) {
 		struct nfqnl_msg_packet_timestamp ts;
-		struct timeval tv = ktime_to_timeval(entskb->tstamp);
-		ts.sec = cpu_to_be64(tv.tv_sec);
-		ts.usec = cpu_to_be64(tv.tv_usec);
+		struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
+
+		ts.sec = cpu_to_be64(kts.tv_sec);
+		ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
 
 		if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
 			goto nla_put_failure;
@@ -508,7 +516,7 @@
 	if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata))
 		goto nla_put_failure;
 
-	if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
+	if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0)
 		goto nla_put_failure;
 
 	if (cap_len > data_len &&
@@ -598,12 +606,9 @@
 nf_queue_entry_dup(struct nf_queue_entry *e)
 {
 	struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
-	if (entry) {
-		if (nf_queue_entry_get_refs(entry))
-			return entry;
-		kfree(entry);
-	}
-	return NULL;
+	if (entry)
+		nf_queue_entry_get_refs(entry);
+	return entry;
 }
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
@@ -698,7 +703,7 @@
 	nf_bridge_adjust_skb_data(skb);
 	segs = skb_gso_segment(skb, 0);
 	/* Does not use PTR_ERR to limit the number of error codes that can be
-	 * returned by nf_queue.  For instance, callers rely on -ECANCELED to
+	 * returned by nf_queue.  For instance, callers rely on -ESRCH to
 	 * mean 'ignore this hook'.
 	 */
 	if (IS_ERR_OR_NULL(segs))
@@ -1001,6 +1006,28 @@
 	return 0;
 }
 
+static struct nf_conn *nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct,
+				      const struct nlmsghdr *nlh,
+				      const struct nlattr * const nfqa[],
+				      struct nf_queue_entry *entry,
+				      enum ip_conntrack_info *ctinfo)
+{
+	struct nf_conn *ct;
+
+	ct = nfnl_ct->get_ct(entry->skb, ctinfo);
+	if (ct == NULL)
+		return NULL;
+
+	if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0)
+		return NULL;
+
+	if (nfqa[NFQA_EXP])
+		nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct,
+				      NETLINK_CB(entry->skb).portid,
+				      nlmsg_report(nlh));
+	return ct;
+}
+
 static int
 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
 		   const struct nlmsghdr *nlh,
@@ -1014,6 +1041,7 @@
 	unsigned int verdict;
 	struct nf_queue_entry *entry;
 	enum ip_conntrack_info uninitialized_var(ctinfo);
+	struct nfnl_ct_hook *nfnl_ct;
 	struct nf_conn *ct = NULL;
 
 	struct net *net = sock_net(ctnl);
@@ -1037,12 +1065,10 @@
 		return -ENOENT;
 
 	if (nfqa[NFQA_CT]) {
-		ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
-		if (ct && nfqa[NFQA_EXP]) {
-			nfqnl_attach_expect(ct, nfqa[NFQA_EXP],
-					    NETLINK_CB(skb).portid,
-					    nlmsg_report(nlh));
-		}
+		/* rcu lock already held from nfnl->call_rcu. */
+		nfnl_ct = rcu_dereference(nfnl_ct_hook);
+		if (nfnl_ct != NULL)
+			ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo);
 	}
 
 	if (nfqa[NFQA_PAYLOAD]) {
@@ -1053,8 +1079,8 @@
 				 payload_len, entry, diff) < 0)
 			verdict = NF_DROP;
 
-		if (ct)
-			nfqnl_ct_seq_adjust(entry->skb, ct, ctinfo, diff);
+		if (ct && diff)
+			nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff);
 	}
 
 	if (nfqa[NFQA_MARK])
diff --git a/net/netfilter/nfnetlink_queue_ct.c b/net/netfilter/nfnetlink_queue_ct.c
deleted file mode 100644
index 96cac50..0000000
--- a/net/netfilter/nfnetlink_queue_ct.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/skbuff.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/nfnetlink_queue.h>
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nfnetlink_queue.h>
-
-struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size,
-			     enum ip_conntrack_info *ctinfo)
-{
-	struct nfq_ct_hook *nfq_ct;
-	struct nf_conn *ct;
-
-	/* rcu_read_lock()ed by __nf_queue already. */
-	nfq_ct = rcu_dereference(nfq_ct_hook);
-	if (nfq_ct == NULL)
-		return NULL;
-
-	ct = nf_ct_get(entskb, ctinfo);
-	if (ct) {
-		if (!nf_ct_is_untracked(ct))
-			*size += nfq_ct->build_size(ct);
-		else
-			ct = NULL;
-	}
-	return ct;
-}
-
-struct nf_conn *
-nfqnl_ct_parse(const struct sk_buff *skb, const struct nlattr *attr,
-	       enum ip_conntrack_info *ctinfo)
-{
-	struct nfq_ct_hook *nfq_ct;
-	struct nf_conn *ct;
-
-	/* rcu_read_lock()ed by __nf_queue already. */
-	nfq_ct = rcu_dereference(nfq_ct_hook);
-	if (nfq_ct == NULL)
-		return NULL;
-
-	ct = nf_ct_get(skb, ctinfo);
-	if (ct && !nf_ct_is_untracked(ct))
-		nfq_ct->parse(attr, ct);
-
-	return ct;
-}
-
-int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
-		 enum ip_conntrack_info ctinfo)
-{
-	struct nfq_ct_hook *nfq_ct;
-	struct nlattr *nest_parms;
-	u_int32_t tmp;
-
-	nfq_ct = rcu_dereference(nfq_ct_hook);
-	if (nfq_ct == NULL)
-		return 0;
-
-	nest_parms = nla_nest_start(skb, NFQA_CT | NLA_F_NESTED);
-	if (!nest_parms)
-		goto nla_put_failure;
-
-	if (nfq_ct->build(skb, ct) < 0)
-		goto nla_put_failure;
-
-	nla_nest_end(skb, nest_parms);
-
-	tmp = ctinfo;
-	if (nla_put_be32(skb, NFQA_CT_INFO, htonl(tmp)))
-		goto nla_put_failure;
-
-	return 0;
-
-nla_put_failure:
-	return -1;
-}
-
-void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
-			 enum ip_conntrack_info ctinfo, int diff)
-{
-	struct nfq_ct_hook *nfq_ct;
-
-	nfq_ct = rcu_dereference(nfq_ct_hook);
-	if (nfq_ct == NULL)
-		return;
-
-	if ((ct->status & IPS_NAT_MASK) && diff)
-		nfq_ct->seq_adjust(skb, ct, ctinfo, diff);
-}
-
-int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
-			u32 portid, u32 report)
-{
-	struct nfq_ct_hook *nfq_ct;
-
-	if (nf_ct_is_untracked(ct))
-		return 0;
-
-	nfq_ct = rcu_dereference(nfq_ct_hook);
-	if (nfq_ct == NULL)
-		return -EOPNOTSUPP;
-
-	return nfq_ct->attach_expect(attr, ct, portid, report);
-}
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index 0bc19f97..759ca52 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -31,7 +31,7 @@
 					pkt->hook);
 			break;
 		case NFT_REJECT_TCP_RST:
-			nf_send_reset(pkt->skb, pkt->hook);
+			nf_send_reset(pkt->net, pkt->skb, pkt->hook);
 			break;
 		case NFT_REJECT_ICMPX_UNREACH:
 			nf_send_unreach(pkt->skb,
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 9b42b5e..d4aaad7 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1193,7 +1193,6 @@
 		if (!(hook_mask & 1))
 			continue;
 		ops[i].hook     = fn;
-		ops[i].owner    = table->me;
 		ops[i].pf       = table->af;
 		ops[i].hooknum  = hooknum;
 		ops[i].priority = table->priority;
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index faf32d8..e7ac07e 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -171,6 +171,9 @@
 	if (timeout_ext == NULL)
 		ret = -ENOMEM;
 
+	rcu_read_unlock();
+	return ret;
+
 err_put_timeout:
 	__xt_ct_tg_timeout_put(timeout);
 out:
@@ -318,8 +321,10 @@
 
 	if (timeout_put) {
 		timeout_ext = nf_ct_timeout_find(ct);
-		if (timeout_ext)
+		if (timeout_ext) {
 			timeout_put(timeout_ext->timeout);
+			RCU_INIT_POINTER(timeout_ext->timeout, NULL);
+		}
 	}
 	rcu_read_unlock();
 #endif
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
index 452ba2a..71a9d95 100644
--- a/net/netfilter/xt_ipvs.c
+++ b/net/netfilter/xt_ipvs.c
@@ -48,6 +48,7 @@
 ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
 	const struct xt_ipvs_mtinfo *data = par->matchinfo;
+	struct netns_ipvs *ipvs = net_ipvs(par->net);
 	/* ipvs_mt_check ensures that family is only NFPROTO_IPV[46]. */
 	const u_int8_t family = par->family;
 	struct ip_vs_iphdr iph;
@@ -85,7 +86,7 @@
 	/*
 	 * Check if the packet belongs to an existing entry
 	 */
-	cp = pp->conn_out_get(family, skb, &iph);
+	cp = pp->conn_out_get(ipvs, family, skb, &iph);
 	if (unlikely(cp == NULL)) {
 		match = false;
 		goto out;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 8f060d7..fafe33b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2371,7 +2371,7 @@
 		int pos, idx, shift;
 
 		err = 0;
-		netlink_table_grab();
+		netlink_lock_table();
 		for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
 			if (len - pos < sizeof(u32))
 				break;
@@ -2386,7 +2386,7 @@
 		}
 		if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
 			err = -EFAULT;
-		netlink_table_ungrab();
+		netlink_unlock_table();
 		break;
 	}
 	case NETLINK_CAP_ACK:
@@ -2785,6 +2785,7 @@
 	struct sk_buff *skb = NULL;
 	struct nlmsghdr *nlh;
 	int len, err = -ENOBUFS;
+	int alloc_min_size;
 	int alloc_size;
 
 	mutex_lock(nlk->cb_mutex);
@@ -2793,9 +2794,6 @@
 		goto errout_skb;
 	}
 
-	cb = &nlk->cb;
-	alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
-
 	if (!netlink_rx_is_mmaped(sk) &&
 	    atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
 		goto errout_skb;
@@ -2805,23 +2803,35 @@
 	 * to reduce number of system calls on dump operations, if user
 	 * ever provided a big enough buffer.
 	 */
-	if (alloc_size < nlk->max_recvmsg_len) {
-		skb = netlink_alloc_skb(sk,
-					nlk->max_recvmsg_len,
-					nlk->portid,
+	cb = &nlk->cb;
+	alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
+
+	if (alloc_min_size < nlk->max_recvmsg_len) {
+		alloc_size = nlk->max_recvmsg_len;
+		skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
 					GFP_KERNEL |
 					__GFP_NOWARN |
 					__GFP_NORETRY);
-		/* available room should be exact amount to avoid MSG_TRUNC */
-		if (skb)
-			skb_reserve(skb, skb_tailroom(skb) -
-					 nlk->max_recvmsg_len);
 	}
-	if (!skb)
+	if (!skb) {
+		alloc_size = alloc_min_size;
 		skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
 					GFP_KERNEL);
+	}
 	if (!skb)
 		goto errout_skb;
+
+	/* Trim skb to allocated size. User is expected to provide buffer as
+	 * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
+	 * netlink_recvmsg())). dump will pack as many smaller messages as
+	 * could fit within the allocated skb. skb is typically allocated
+	 * with larger space than required (could be as much as near 2x the
+	 * requested size with align to next power of 2 approach). Allowing
+	 * dump to use the excess space makes it difficult for a user to have a
+	 * reasonable static buffer based on the expected largest dump of a
+	 * single netdev. The outcome is MSG_TRUNC error.
+	 */
+	skb_reserve(skb, skb_tailroom(skb) - alloc_size);
 	netlink_skb_set_owner_r(skb, sk);
 
 	len = cb->dump(skb, cb);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 75724a9..bc0e504 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -39,7 +39,7 @@
 EXPORT_SYMBOL(genl_unlock);
 
 #ifdef CONFIG_LOCKDEP
-int lockdep_genl_is_held(void)
+bool lockdep_genl_is_held(void)
 {
 	return lockdep_is_held(&genl_mutex);
 }
diff --git a/net/nfc/core.c b/net/nfc/core.c
index cff3f16..1fe3d3b 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -449,7 +449,7 @@
  * @dev: The nfc device that found the target
  * @target_idx: index of the target that must be deactivated
  */
-int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
+int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode)
 {
 	int rc = 0;
 
@@ -476,7 +476,7 @@
 	if (dev->ops->check_presence)
 		del_timer_sync(&dev->check_pres_timer);
 
-	dev->ops->deactivate_target(dev, dev->active_target);
+	dev->ops->deactivate_target(dev, dev->active_target, mode);
 	dev->active_target = NULL;
 
 error:
diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c
index 009bcf3..23c2a11 100644
--- a/net/nfc/digital_core.c
+++ b/net/nfc/digital_core.c
@@ -631,7 +631,8 @@
 }
 
 static void digital_deactivate_target(struct nfc_dev *nfc_dev,
-				      struct nfc_target *target)
+				      struct nfc_target *target,
+				      u8 mode)
 {
 	struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
 
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 6e061da..2b0f0ac 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -678,7 +678,8 @@
 }
 
 static void hci_deactivate_target(struct nfc_dev *nfc_dev,
-				  struct nfc_target *target)
+				  struct nfc_target *target,
+				  u8 mode)
 {
 }
 
diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c
index 1b90c05..1399a03 100644
--- a/net/nfc/hci/llc.c
+++ b/net/nfc/hci/llc.c
@@ -144,11 +144,13 @@
 {
 	return llc->ops->start(llc);
 }
+EXPORT_SYMBOL(nfc_llc_start);
 
 inline int nfc_llc_stop(struct nfc_llc *llc)
 {
 	return llc->ops->stop(llc);
 }
+EXPORT_SYMBOL(nfc_llc_stop);
 
 inline void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
 {
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig
index 901c1dd..85d4819 100644
--- a/net/nfc/nci/Kconfig
+++ b/net/nfc/nci/Kconfig
@@ -12,7 +12,7 @@
 config NFC_NCI_SPI
 	depends on NFC_NCI && SPI
 	select CRC_CCITT
-	bool "NCI over SPI protocol support"
+	tristate "NCI over SPI protocol support"
 	default n
 	help
 	  NCI (NFC Controller Interface) is a communication protocol between
diff --git a/net/nfc/nci/Makefile b/net/nfc/nci/Makefile
index b4b85b8..0ca31d9 100644
--- a/net/nfc/nci/Makefile
+++ b/net/nfc/nci/Makefile
@@ -6,7 +6,8 @@
 
 nci-objs := core.o data.o lib.o ntf.o rsp.o hci.o
 
-nci-$(CONFIG_NFC_NCI_SPI) += spi.o
+nci_spi-y += spi.o
+obj-$(CONFIG_NFC_NCI_SPI) += nci_spi.o
 
 nci_uart-y += uart.o
 obj-$(CONFIG_NFC_NCI_UART) += nci_uart.o
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 943889b..10c99a5 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -64,6 +64,19 @@
 	return NULL;
 }
 
+int nci_get_conn_info_by_id(struct nci_dev *ndev, u8 id)
+{
+	struct nci_conn_info *conn_info;
+
+	list_for_each_entry(conn_info, &ndev->conn_info_list, list) {
+		if (conn_info->id == id)
+			return conn_info->conn_id;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL(nci_get_conn_info_by_id);
+
 /* ---- NCI requests ---- */
 
 void nci_req_complete(struct nci_dev *ndev, int result)
@@ -325,32 +338,46 @@
 		     sizeof(struct nci_rf_deactivate_cmd), &cmd);
 }
 
-struct nci_prop_cmd_param {
+struct nci_cmd_param {
 	__u16 opcode;
 	size_t len;
 	__u8 *payload;
 };
 
-static void nci_prop_cmd_req(struct nci_dev *ndev, unsigned long opt)
+static void nci_generic_req(struct nci_dev *ndev, unsigned long opt)
 {
-	struct nci_prop_cmd_param *param = (struct nci_prop_cmd_param *)opt;
+	struct nci_cmd_param *param =
+		(struct nci_cmd_param *)opt;
 
 	nci_send_cmd(ndev, param->opcode, param->len, param->payload);
 }
 
 int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload)
 {
-	struct nci_prop_cmd_param param;
+	struct nci_cmd_param param;
 
 	param.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY, oid);
 	param.len = len;
 	param.payload = payload;
 
-	return __nci_request(ndev, nci_prop_cmd_req, (unsigned long)&param,
+	return __nci_request(ndev, nci_generic_req, (unsigned long)&param,
 			     msecs_to_jiffies(NCI_CMD_TIMEOUT));
 }
 EXPORT_SYMBOL(nci_prop_cmd);
 
+int nci_core_cmd(struct nci_dev *ndev, __u16 opcode, size_t len, __u8 *payload)
+{
+	struct nci_cmd_param param;
+
+	param.opcode = opcode;
+	param.len = len;
+	param.payload = payload;
+
+	return __nci_request(ndev, nci_generic_req, (unsigned long)&param,
+			     msecs_to_jiffies(NCI_CMD_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_core_cmd);
+
 int nci_core_reset(struct nci_dev *ndev)
 {
 	return __nci_request(ndev, nci_reset_req, 0,
@@ -402,9 +429,8 @@
 				   msecs_to_jiffies(NCI_INIT_TIMEOUT));
 	}
 
-	if (ndev->ops->post_setup) {
+	if (!rc && ndev->ops->post_setup)
 		rc = ndev->ops->post_setup(ndev);
-	}
 
 	if (!rc) {
 		rc = __nci_request(ndev, nci_init_complete_req, 0,
@@ -540,7 +566,7 @@
 
 int nci_nfcee_discover(struct nci_dev *ndev, u8 action)
 {
-	return nci_request(ndev, nci_nfcee_discover_req, action,
+	return __nci_request(ndev, nci_nfcee_discover_req, action,
 				msecs_to_jiffies(NCI_CMD_TIMEOUT));
 }
 EXPORT_SYMBOL(nci_nfcee_discover);
@@ -561,8 +587,9 @@
 	cmd.nfcee_id = nfcee_id;
 	cmd.nfcee_mode = nfcee_mode;
 
-	return nci_request(ndev, nci_nfcee_mode_set_req, (unsigned long)&cmd,
-			   msecs_to_jiffies(NCI_CMD_TIMEOUT));
+	return __nci_request(ndev, nci_nfcee_mode_set_req,
+			     (unsigned long)&cmd,
+			     msecs_to_jiffies(NCI_CMD_TIMEOUT));
 }
 EXPORT_SYMBOL(nci_nfcee_mode_set);
 
@@ -588,12 +615,19 @@
 	if (!cmd)
 		return -ENOMEM;
 
+	if (!number_destination_params)
+		return -EINVAL;
+
 	cmd->destination_type = destination_type;
 	cmd->number_destination_params = number_destination_params;
 	memcpy(cmd->params, params, params_len);
 
 	data.cmd = cmd;
-	ndev->cur_id = params->value[DEST_SPEC_PARAMS_ID_INDEX];
+
+	if (params->length > 0)
+		ndev->cur_id = params->value[DEST_SPEC_PARAMS_ID_INDEX];
+	else
+		ndev->cur_id = 0;
 
 	r = __nci_request(ndev, nci_core_conn_create_req,
 			  (unsigned long)&data,
@@ -612,8 +646,8 @@
 
 int nci_core_conn_close(struct nci_dev *ndev, u8 conn_id)
 {
-	return nci_request(ndev, nci_core_conn_close_req, conn_id,
-				msecs_to_jiffies(NCI_CMD_TIMEOUT));
+	return __nci_request(ndev, nci_core_conn_close_req, conn_id,
+			     msecs_to_jiffies(NCI_CMD_TIMEOUT));
 }
 EXPORT_SYMBOL(nci_core_conn_close);
 
@@ -801,9 +835,11 @@
 }
 
 static void nci_deactivate_target(struct nfc_dev *nfc_dev,
-				  struct nfc_target *target)
+				  struct nfc_target *target,
+				  __u8 mode)
 {
 	struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+	u8 nci_mode = NCI_DEACTIVATE_TYPE_IDLE_MODE;
 
 	pr_debug("entry\n");
 
@@ -814,9 +850,14 @@
 
 	ndev->target_active_prot = 0;
 
+	switch (mode) {
+	case NFC_TARGET_MODE_SLEEP:
+		nci_mode = NCI_DEACTIVATE_TYPE_SLEEP_MODE;
+		break;
+	}
+
 	if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
-		nci_request(ndev, nci_rf_deactivate_req,
-			    NCI_DEACTIVATE_TYPE_IDLE_MODE,
+		nci_request(ndev, nci_rf_deactivate_req, nci_mode,
 			    msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
 	}
 }
@@ -850,7 +891,7 @@
 	pr_debug("entry\n");
 
 	if (nfc_dev->rf_mode == NFC_RF_INITIATOR) {
-		nci_deactivate_target(nfc_dev, NULL);
+		nci_deactivate_target(nfc_dev, NULL, NCI_DEACTIVATE_TYPE_IDLE_MODE);
 	} else {
 		if (atomic_read(&ndev->state) == NCI_LISTEN_ACTIVE ||
 		    atomic_read(&ndev->state) == NCI_DISCOVERY) {
@@ -1177,7 +1218,7 @@
 }
 EXPORT_SYMBOL(nci_recv_frame);
 
-static int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb)
+int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb)
 {
 	pr_debug("len %d\n", skb->len);
 
@@ -1195,6 +1236,7 @@
 
 	return ndev->ops->send(ndev, skb);
 }
+EXPORT_SYMBOL(nci_send_frame);
 
 /* Send NCI command */
 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
@@ -1226,48 +1268,80 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(nci_send_cmd);
 
 /* Proprietary commands API */
-static struct nci_prop_ops *prop_cmd_lookup(struct nci_dev *ndev,
-					    __u16 opcode)
+static struct nci_driver_ops *ops_cmd_lookup(struct nci_driver_ops *ops,
+					     size_t n_ops,
+					     __u16 opcode)
 {
 	size_t i;
-	struct nci_prop_ops *prop_op;
+	struct nci_driver_ops *op;
 
-	if (!ndev->ops->prop_ops || !ndev->ops->n_prop_ops)
+	if (!ops || !n_ops)
 		return NULL;
 
-	for (i = 0; i < ndev->ops->n_prop_ops; i++) {
-		prop_op = &ndev->ops->prop_ops[i];
-		if (prop_op->opcode == opcode)
-			return prop_op;
+	for (i = 0; i < n_ops; i++) {
+		op = &ops[i];
+		if (op->opcode == opcode)
+			return op;
 	}
 
 	return NULL;
 }
 
-int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 rsp_opcode,
-			struct sk_buff *skb)
+static int nci_op_rsp_packet(struct nci_dev *ndev, __u16 rsp_opcode,
+			     struct sk_buff *skb, struct nci_driver_ops *ops,
+			     size_t n_ops)
 {
-	struct nci_prop_ops *prop_op;
+	struct nci_driver_ops *op;
 
-	prop_op = prop_cmd_lookup(ndev, rsp_opcode);
-	if (!prop_op || !prop_op->rsp)
+	op = ops_cmd_lookup(ops, n_ops, rsp_opcode);
+	if (!op || !op->rsp)
 		return -ENOTSUPP;
 
-	return prop_op->rsp(ndev, skb);
+	return op->rsp(ndev, skb);
 }
 
-int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 ntf_opcode,
-			struct sk_buff *skb)
+static int nci_op_ntf_packet(struct nci_dev *ndev, __u16 ntf_opcode,
+			     struct sk_buff *skb, struct nci_driver_ops *ops,
+			     size_t n_ops)
 {
-	struct nci_prop_ops *prop_op;
+	struct nci_driver_ops *op;
 
-	prop_op = prop_cmd_lookup(ndev, ntf_opcode);
-	if (!prop_op || !prop_op->ntf)
+	op = ops_cmd_lookup(ops, n_ops, ntf_opcode);
+	if (!op || !op->ntf)
 		return -ENOTSUPP;
 
-	return prop_op->ntf(ndev, skb);
+	return op->ntf(ndev, skb);
+}
+
+int nci_prop_rsp_packet(struct nci_dev *ndev, __u16 opcode,
+			struct sk_buff *skb)
+{
+	return nci_op_rsp_packet(ndev, opcode, skb, ndev->ops->prop_ops,
+				 ndev->ops->n_prop_ops);
+}
+
+int nci_prop_ntf_packet(struct nci_dev *ndev, __u16 opcode,
+			struct sk_buff *skb)
+{
+	return nci_op_ntf_packet(ndev, opcode, skb, ndev->ops->prop_ops,
+				 ndev->ops->n_prop_ops);
+}
+
+int nci_core_rsp_packet(struct nci_dev *ndev, __u16 opcode,
+			struct sk_buff *skb)
+{
+	return nci_op_rsp_packet(ndev, opcode, skb, ndev->ops->core_ops,
+				  ndev->ops->n_core_ops);
+}
+
+int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode,
+			struct sk_buff *skb)
+{
+	return nci_op_ntf_packet(ndev, opcode, skb, ndev->ops->core_ops,
+				 ndev->ops->n_core_ops);
 }
 
 /* ---- NCI TX Data worker thread ---- */
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index 566466d..dbd2425 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -90,6 +90,18 @@
 	nci_pbf_set((__u8 *)hdr, pbf);
 }
 
+int nci_conn_max_data_pkt_payload_size(struct nci_dev *ndev, __u8 conn_id)
+{
+	struct nci_conn_info *conn_info;
+
+	conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+	if (!conn_info)
+		return -EPROTO;
+
+	return conn_info->max_pkt_payload_len;
+}
+EXPORT_SYMBOL(nci_conn_max_data_pkt_payload_size);
+
 static int nci_queue_tx_data_frags(struct nci_dev *ndev,
 				   __u8 conn_id,
 				   struct sk_buff *skb) {
@@ -203,6 +215,7 @@
 exit:
 	return rc;
 }
+EXPORT_SYMBOL(nci_send_data);
 
 /* ----------------- NCI RX Data ----------------- */
 
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index 609f922..2aedac1 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -70,6 +70,7 @@
 #define NCI_HCI_ANY_SET_PARAMETER  0x01
 #define NCI_HCI_ANY_GET_PARAMETER  0x02
 #define NCI_HCI_ANY_CLOSE_PIPE     0x04
+#define NCI_HCI_ADM_CLEAR_ALL_PIPE 0x14
 
 #define NCI_HFP_NO_CHAINING        0x80
 
@@ -78,6 +79,8 @@
 #define NCI_EVT_HOT_PLUG           0x03
 
 #define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY       0x01
+#define NCI_HCI_ADM_CREATE_PIPE			0x10
+#define NCI_HCI_ADM_DELETE_PIPE			0x11
 
 /* HCP headers */
 #define NCI_HCI_HCP_PACKET_HEADER_LEN      1
@@ -101,6 +104,20 @@
 #define NCI_HCP_MSG_GET_CMD(header)  (header & 0x3f)
 #define NCI_HCP_MSG_GET_PIPE(header) (header & 0x7f)
 
+static int nci_hci_result_to_errno(u8 result)
+{
+	switch (result) {
+	case NCI_HCI_ANY_OK:
+		return 0;
+	case NCI_HCI_ANY_E_REG_PAR_UNKNOWN:
+		return -EOPNOTSUPP;
+	case NCI_HCI_ANY_E_TIMEOUT:
+		return -ETIME;
+	default:
+		return -1;
+	}
+}
+
 /* HCI core */
 static void nci_hci_reset_pipes(struct nci_hci_dev *hdev)
 {
@@ -146,18 +163,18 @@
 	if (!conn_info)
 		return -EPROTO;
 
-	skb = nci_skb_alloc(ndev, 2 + conn_info->max_pkt_payload_len +
+	i = 0;
+	skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len +
 			    NCI_DATA_HDR_SIZE, GFP_KERNEL);
 	if (!skb)
 		return -ENOMEM;
 
-	skb_reserve(skb, 2 + NCI_DATA_HDR_SIZE);
+	skb_reserve(skb, NCI_DATA_HDR_SIZE + 2);
 	*skb_push(skb, 1) = data_type;
 
-	i = 0;
-	len = conn_info->max_pkt_payload_len;
-
 	do {
+		len = conn_info->max_pkt_payload_len;
+
 		/* If last packet add NCI_HFP_NO_CHAINING */
 		if (i + conn_info->max_pkt_payload_len -
 		    (skb->len + 1) >= data_len) {
@@ -177,9 +194,15 @@
 			return r;
 
 		i += len;
+
 		if (i < data_len) {
-			skb_trim(skb, 0);
-			skb_pull(skb, len);
+			skb = nci_skb_alloc(ndev,
+					    conn_info->max_pkt_payload_len +
+					    NCI_DATA_HDR_SIZE, GFP_KERNEL);
+			if (!skb)
+				return -ENOMEM;
+
+			skb_reserve(skb, NCI_DATA_HDR_SIZE + 1);
 		}
 	} while (i < data_len);
 
@@ -212,7 +235,8 @@
 		     const u8 *param, size_t param_len,
 		     struct sk_buff **skb)
 {
-	struct nci_conn_info    *conn_info;
+	struct nci_hcp_message *message;
+	struct nci_conn_info   *conn_info;
 	struct nci_data data;
 	int r;
 	u8 pipe = ndev->hci_dev->gate2pipe[gate];
@@ -232,14 +256,34 @@
 
 	r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
 			msecs_to_jiffies(NCI_DATA_TIMEOUT));
+	if (r == NCI_STATUS_OK) {
+		message = (struct nci_hcp_message *)conn_info->rx_skb->data;
+		r = nci_hci_result_to_errno(
+			NCI_HCP_MSG_GET_CMD(message->header));
+		skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
 
-	if (r == NCI_STATUS_OK && skb)
-		*skb = conn_info->rx_skb;
+		if (!r && skb)
+			*skb = conn_info->rx_skb;
+	}
 
 	return r;
 }
 EXPORT_SYMBOL(nci_hci_send_cmd);
 
+int nci_hci_clear_all_pipes(struct nci_dev *ndev)
+{
+	int r;
+
+	r = nci_hci_send_cmd(ndev, NCI_HCI_ADMIN_GATE,
+			     NCI_HCI_ADM_CLEAR_ALL_PIPE, NULL, 0, NULL);
+	if (r < 0)
+		return r;
+
+	nci_hci_reset_pipes(ndev->hci_dev);
+	return r;
+}
+EXPORT_SYMBOL(nci_hci_clear_all_pipes);
+
 static void nci_hci_event_received(struct nci_dev *ndev, u8 pipe,
 				   u8 event, struct sk_buff *skb)
 {
@@ -328,9 +372,6 @@
 	struct nci_conn_info    *conn_info;
 	u8 status = result;
 
-	if (result != NCI_HCI_ANY_OK)
-		goto exit;
-
 	conn_info = ndev->hci_dev->conn_info;
 	if (!conn_info) {
 		status = NCI_STATUS_REJECTED;
@@ -340,7 +381,7 @@
 	conn_info->rx_skb = skb;
 
 exit:
-	nci_req_complete(ndev, status);
+	nci_req_complete(ndev, NCI_STATUS_OK);
 }
 
 /* Receive hcp message for pipe, with type and cmd.
@@ -366,7 +407,7 @@
 		break;
 	}
 
-	nci_req_complete(ndev, 0);
+	nci_req_complete(ndev, NCI_STATUS_OK);
 }
 
 static void nci_hci_msg_rx_work(struct work_struct *work)
@@ -378,7 +419,7 @@
 	u8 pipe, type, instruction;
 
 	while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
-		pipe = skb->data[0];
+		pipe = NCI_HCP_MSG_GET_PIPE(skb->data[0]);
 		skb_pull(skb, NCI_HCI_HCP_PACKET_HEADER_LEN);
 		message = (struct nci_hcp_message *)skb->data;
 		type = NCI_HCP_MSG_GET_TYPE(message->header);
@@ -395,7 +436,7 @@
 {
 	struct nci_dev *ndev = (struct nci_dev *)context;
 	struct nci_hcp_packet *packet;
-	u8 pipe, type, instruction;
+	u8 pipe, type;
 	struct sk_buff *hcp_skb;
 	struct sk_buff *frag_skb;
 	int msg_len;
@@ -415,7 +456,7 @@
 
 	/* it's the last fragment. Does it need re-aggregation? */
 	if (skb_queue_len(&ndev->hci_dev->rx_hcp_frags)) {
-		pipe = packet->header & NCI_HCI_FRAGMENT;
+		pipe = NCI_HCP_MSG_GET_PIPE(packet->header);
 		skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb);
 
 		msg_len = 0;
@@ -434,7 +475,7 @@
 		*skb_put(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN) = pipe;
 
 		skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) {
-		       msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN;
+			msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN;
 			memcpy(skb_put(hcp_skb, msg_len), frag_skb->data +
 			       NCI_HCI_HCP_PACKET_HEADER_LEN, msg_len);
 		}
@@ -452,11 +493,10 @@
 	packet = (struct nci_hcp_packet *)hcp_skb->data;
 	type = NCI_HCP_MSG_GET_TYPE(packet->message.header);
 	if (type == NCI_HCI_HCP_RESPONSE) {
-		pipe = packet->header;
-		instruction = NCI_HCP_MSG_GET_CMD(packet->message.header);
-		skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN +
-			 NCI_HCI_HCP_MESSAGE_HEADER_LEN);
-		nci_hci_hcp_message_rx(ndev, pipe, type, instruction, hcp_skb);
+		pipe = NCI_HCP_MSG_GET_PIPE(packet->header);
+		skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN);
+		nci_hci_hcp_message_rx(ndev, pipe, type,
+				       NCI_STATUS_OK, hcp_skb);
 	} else {
 		skb_queue_tail(&ndev->hci_dev->msg_rx_queue, hcp_skb);
 		schedule_work(&ndev->hci_dev->msg_rx_work);
@@ -485,9 +525,47 @@
 }
 EXPORT_SYMBOL(nci_hci_open_pipe);
 
+static u8 nci_hci_create_pipe(struct nci_dev *ndev, u8 dest_host,
+			      u8 dest_gate, int *result)
+{
+	u8 pipe;
+	struct sk_buff *skb;
+	struct nci_hci_create_pipe_params params;
+	struct nci_hci_create_pipe_resp *resp;
+
+	pr_debug("gate=%d\n", dest_gate);
+
+	params.src_gate = NCI_HCI_ADMIN_GATE;
+	params.dest_host = dest_host;
+	params.dest_gate = dest_gate;
+
+	*result = nci_hci_send_cmd(ndev, NCI_HCI_ADMIN_GATE,
+				   NCI_HCI_ADM_CREATE_PIPE,
+				   (u8 *)&params, sizeof(params), &skb);
+	if (*result < 0)
+		return NCI_HCI_INVALID_PIPE;
+
+	resp = (struct nci_hci_create_pipe_resp *)skb->data;
+	pipe = resp->pipe;
+	kfree_skb(skb);
+
+	pr_debug("pipe created=%d\n", pipe);
+
+	return pipe;
+}
+
+static int nci_hci_delete_pipe(struct nci_dev *ndev, u8 pipe)
+{
+	pr_debug("\n");
+
+	return nci_hci_send_cmd(ndev, NCI_HCI_ADMIN_GATE,
+				NCI_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL);
+}
+
 int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
 		      const u8 *param, size_t param_len)
 {
+	struct nci_hcp_message *message;
 	struct nci_conn_info *conn_info;
 	struct nci_data data;
 	int r;
@@ -520,6 +598,12 @@
 	r = nci_request(ndev, nci_hci_send_data_req,
 			(unsigned long)&data,
 			msecs_to_jiffies(NCI_DATA_TIMEOUT));
+	if (r == NCI_STATUS_OK) {
+		message = (struct nci_hcp_message *)conn_info->rx_skb->data;
+		r = nci_hci_result_to_errno(
+			NCI_HCP_MSG_GET_CMD(message->header));
+		skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
+	}
 
 	kfree(tmp);
 	return r;
@@ -529,6 +613,7 @@
 int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
 		      struct sk_buff **skb)
 {
+	struct nci_hcp_message *message;
 	struct nci_conn_info    *conn_info;
 	struct nci_data data;
 	int r;
@@ -553,8 +638,15 @@
 	r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
 			msecs_to_jiffies(NCI_DATA_TIMEOUT));
 
-	if (r == NCI_STATUS_OK)
-		*skb = conn_info->rx_skb;
+	if (r == NCI_STATUS_OK) {
+		message = (struct nci_hcp_message *)conn_info->rx_skb->data;
+		r = nci_hci_result_to_errno(
+			NCI_HCP_MSG_GET_CMD(message->header));
+		skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
+
+		if (!r && skb)
+			*skb = conn_info->rx_skb;
+	}
 
 	return r;
 }
@@ -563,6 +655,7 @@
 int nci_hci_connect_gate(struct nci_dev *ndev,
 			 u8 dest_host, u8 dest_gate, u8 pipe)
 {
+	bool pipe_created = false;
 	int r;
 
 	if (pipe == NCI_HCI_DO_NOT_OPEN_PIPE)
@@ -581,12 +674,26 @@
 	case NCI_HCI_ADMIN_GATE:
 		pipe = NCI_HCI_ADMIN_PIPE;
 	break;
+	default:
+		pipe = nci_hci_create_pipe(ndev, dest_host, dest_gate, &r);
+		if (pipe < 0)
+			return r;
+		pipe_created = true;
+		break;
 	}
 
 open_pipe:
 	r = nci_hci_open_pipe(ndev, pipe);
-	if (r < 0)
+	if (r < 0) {
+		if (pipe_created) {
+			if (nci_hci_delete_pipe(ndev, pipe) < 0) {
+				/* TODO: Cannot clean by deleting pipe...
+				 * -> inconsistent state
+				 */
+			}
+		}
 		return r;
+	}
 
 	ndev->hci_dev->pipes[pipe].gate = dest_gate;
 	ndev->hci_dev->pipes[pipe].host = dest_host;
@@ -653,6 +760,10 @@
 		/* Restore gate<->pipe table from some proprietary location. */
 		r = ndev->ops->hci_load_session(ndev);
 	} else {
+		r = nci_hci_clear_all_pipes(ndev);
+		if (r < 0)
+			goto exit;
+
 		r = nci_hci_dev_connect_gates(ndev,
 					      ndev->hci_dev->init_data.gate_count,
 					      ndev->hci_dev->init_data.gates);
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 5d1c2e3..2ada2b3 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -759,7 +759,7 @@
 	skb_pull(skb, NCI_CTRL_HDR_SIZE);
 
 	if (nci_opcode_gid(ntf_opcode) == NCI_GID_PROPRIETARY) {
-		if (nci_prop_ntf_packet(ndev, ntf_opcode, skb)) {
+		if (nci_prop_ntf_packet(ndev, ntf_opcode, skb) == -ENOTSUPP) {
 			pr_err("unsupported ntf opcode 0x%x\n",
 			       ntf_opcode);
 		}
@@ -805,6 +805,7 @@
 		break;
 	}
 
+	nci_core_ntf_packet(ndev, ntf_opcode, skb);
 end:
 	kfree_skb(skb);
 }
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
index 408bd8f..9b6eb91 100644
--- a/net/nfc/nci/rsp.c
+++ b/net/nfc/nci/rsp.c
@@ -355,6 +355,7 @@
 		break;
 	}
 
+	nci_core_rsp_packet(ndev, rsp_opcode, skb);
 end:
 	kfree_skb(skb);
 
diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c
index ec250e7..d904cd2 100644
--- a/net/nfc/nci/spi.c
+++ b/net/nfc/nci/spi.c
@@ -18,6 +18,8 @@
 
 #define pr_fmt(fmt) "nci_spi: %s: " fmt, __func__
 
+#include <linux/module.h>
+
 #include <linux/export.h>
 #include <linux/spi/spi.h>
 #include <linux/crc-ccitt.h>
@@ -56,6 +58,7 @@
 	}
 	t.cs_change = cs_change;
 	t.delay_usecs = nspi->xfer_udelay;
+	t.speed_hz = nspi->xfer_speed_hz;
 
 	spi_message_init(&m);
 	spi_message_add_tail(&t, &m);
@@ -142,7 +145,8 @@
 
 	nspi->acknowledge_mode = acknowledge_mode;
 	nspi->xfer_udelay = delay;
-
+	/* Use controller max SPI speed by default */
+	nspi->xfer_speed_hz = 0;
 	nspi->spi = spi;
 	nspi->ndev = ndev;
 	init_completion(&nspi->req_completion);
@@ -195,12 +199,14 @@
 	tx.tx_buf = req;
 	tx.len = 2;
 	tx.cs_change = 0;
+	tx.speed_hz = nspi->xfer_speed_hz;
 	spi_message_add_tail(&tx, &m);
 
 	memset(&rx, 0, sizeof(struct spi_transfer));
 	rx.rx_buf = resp_hdr;
 	rx.len = 2;
 	rx.cs_change = 1;
+	rx.speed_hz = nspi->xfer_speed_hz;
 	spi_message_add_tail(&rx, &m);
 
 	ret = spi_sync(nspi->spi, &m);
@@ -224,6 +230,7 @@
 	rx.len = rx_len;
 	rx.cs_change = 0;
 	rx.delay_usecs = nspi->xfer_udelay;
+	rx.speed_hz = nspi->xfer_speed_hz;
 	spi_message_add_tail(&rx, &m);
 
 	ret = spi_sync(nspi->spi, &m);
@@ -320,3 +327,5 @@
 	return skb;
 }
 EXPORT_SYMBOL_GPL(nci_spi_read);
+
+MODULE_LICENSE("GPL");
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 853172c..f58c1fb 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -885,7 +885,7 @@
 	target_idx = nla_get_u32(info->attrs[NFC_ATTR_TARGET_INDEX]);
 	protocol = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]);
 
-	nfc_deactivate_target(dev, target_idx);
+	nfc_deactivate_target(dev, target_idx, NFC_TARGET_MODE_SLEEP);
 	rc = nfc_activate_target(dev, target_idx, protocol);
 
 	nfc_put_device(dev);
@@ -1109,10 +1109,8 @@
 	idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
 
 	dev = nfc_get_device(idx);
-	if (!dev) {
-		rc = -ENODEV;
-		goto exit;
-	}
+	if (!dev)
+		return -ENODEV;
 
 	device_lock(&dev->dev);
 
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 5c93e84..c20b784 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -25,6 +25,9 @@
 #include <net/nfc/nfc.h>
 #include <net/sock.h>
 
+#define NFC_TARGET_MODE_IDLE 0
+#define NFC_TARGET_MODE_SLEEP 1
+
 struct nfc_protocol {
 	int id;
 	struct proto *proto;
@@ -147,7 +150,7 @@
 
 int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol);
 
-int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx);
+int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx, u8 mode);
 
 int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
 		      data_exchange_cb_t cb, void *cb_context);
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index e9a9148..e386e6c 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -321,7 +321,8 @@
 
 	if (sk->sk_state == TCP_ESTABLISHED) {
 		nfc_deactivate_target(nfc_rawsock(sk)->dev,
-				      nfc_rawsock(sk)->target_idx);
+				      nfc_rawsock(sk)->target_idx,
+				      NFC_TARGET_MODE_IDLE);
 		nfc_put_device(nfc_rawsock(sk)->dev);
 	}
 
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 315f533..221fa8b 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -620,7 +620,7 @@
 	return 0;
 }
 
-static int ovs_vport_output(struct sock *sock, struct sk_buff *skb)
+static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
 	struct vport *vport = data->vport;
@@ -679,12 +679,12 @@
 	skb_pull(skb, hlen);
 }
 
-static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
-			 __be16 ethertype)
+static void ovs_fragment(struct net *net, struct vport *vport,
+			 struct sk_buff *skb, u16 mru, __be16 ethertype)
 {
 	if (skb_network_offset(skb) > MAX_L2_LEN) {
 		OVS_NLERR(1, "L2 header too long to fragment");
-		return;
+		goto err;
 	}
 
 	if (ethertype == htons(ETH_P_IP)) {
@@ -700,7 +700,7 @@
 		skb_dst_set_noref(skb, &ovs_dst);
 		IPCB(skb)->frag_max_size = mru;
 
-		ip_do_fragment(skb->sk, skb, ovs_vport_output);
+		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
 		refdst_drop(orig_dst);
 	} else if (ethertype == htons(ETH_P_IPV6)) {
 		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
@@ -708,8 +708,7 @@
 		struct rt6_info ovs_rt;
 
 		if (!v6ops) {
-			kfree_skb(skb);
-			return;
+			goto err;
 		}
 
 		prepare_frag(vport, skb);
@@ -722,14 +721,18 @@
 		skb_dst_set_noref(skb, &ovs_rt.dst);
 		IP6CB(skb)->frag_max_size = mru;
 
-		v6ops->fragment(skb->sk, skb, ovs_vport_output);
+		v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
 		refdst_drop(orig_dst);
 	} else {
 		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
 			  ovs_vport_name(vport), ntohs(ethertype), mru,
 			  vport->dev->mtu);
-		kfree_skb(skb);
+		goto err;
 	}
+
+	return;
+err:
+	kfree_skb(skb);
 }
 
 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
@@ -743,6 +746,7 @@
 		if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
 			ovs_vport_send(vport, skb);
 		} else if (mru <= vport->dev->mtu) {
+			struct net *net = read_pnet(&dp->net);
 			__be16 ethertype = key->eth.type;
 
 			if (!is_flow_key_valid(key)) {
@@ -752,7 +756,7 @@
 					ethertype = vlan_get_protocol(skb);
 			}
 
-			ovs_fragment(vport, skb, mru, ethertype);
+			ovs_fragment(net, vport, skb, mru, ethertype);
 		} else {
 			kfree_skb(skb);
 		}
@@ -765,7 +769,6 @@
 			    struct sw_flow_key *key, const struct nlattr *attr,
 			    const struct nlattr *actions, int actions_len)
 {
-	struct ip_tunnel_info info;
 	struct dp_upcall_info upcall;
 	const struct nlattr *a;
 	int rem;
@@ -793,11 +796,9 @@
 			if (vport) {
 				int err;
 
-				upcall.egress_tun_info = &info;
-				err = ovs_vport_get_egress_tun_info(vport, skb,
-								    &upcall);
-				if (err)
-					upcall.egress_tun_info = NULL;
+				err = dev_fill_metadata_dst(vport->dev, skb);
+				if (!err)
+					upcall.egress_tun_info = skb_tunnel_info(skb);
 			}
 
 			break;
@@ -968,7 +969,7 @@
 	case OVS_KEY_ATTR_CT_STATE:
 	case OVS_KEY_ATTR_CT_ZONE:
 	case OVS_KEY_ATTR_CT_MARK:
-	case OVS_KEY_ATTR_CT_LABEL:
+	case OVS_KEY_ATTR_CT_LABELS:
 		err = -EINVAL;
 		break;
 	}
@@ -1099,6 +1100,12 @@
 			break;
 
 		case OVS_ACTION_ATTR_CT:
+			if (!is_flow_key_valid(key)) {
+				err = ovs_flow_key_update(skb, key);
+				if (err)
+					return err;
+			}
+
 			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
 					     nla_data(a));
 
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index eb759e3..bd165ee 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -37,9 +37,9 @@
 };
 
 /* Metadata label for masked write to conntrack label. */
-struct md_label {
-	struct ovs_key_ct_label value;
-	struct ovs_key_ct_label mask;
+struct md_labels {
+	struct ovs_key_ct_labels value;
+	struct ovs_key_ct_labels mask;
 };
 
 /* Conntrack action context for execution. */
@@ -47,10 +47,10 @@
 	struct nf_conntrack_helper *helper;
 	struct nf_conntrack_zone zone;
 	struct nf_conn *ct;
-	u32 flags;
+	u8 commit : 1;
 	u16 family;
 	struct md_mark mark;
-	struct md_label label;
+	struct md_labels labels;
 };
 
 static u16 key_to_nfproto(const struct sw_flow_key *key)
@@ -109,21 +109,21 @@
 #endif
 }
 
-static void ovs_ct_get_label(const struct nf_conn *ct,
-			     struct ovs_key_ct_label *label)
+static void ovs_ct_get_labels(const struct nf_conn *ct,
+			      struct ovs_key_ct_labels *labels)
 {
 	struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
 
 	if (cl) {
 		size_t len = cl->words * sizeof(long);
 
-		if (len > OVS_CT_LABEL_LEN)
-			len = OVS_CT_LABEL_LEN;
-		else if (len < OVS_CT_LABEL_LEN)
-			memset(label, 0, OVS_CT_LABEL_LEN);
-		memcpy(label, cl->bits, len);
+		if (len > OVS_CT_LABELS_LEN)
+			len = OVS_CT_LABELS_LEN;
+		else if (len < OVS_CT_LABELS_LEN)
+			memset(labels, 0, OVS_CT_LABELS_LEN);
+		memcpy(labels, cl->bits, len);
 	} else {
-		memset(label, 0, OVS_CT_LABEL_LEN);
+		memset(labels, 0, OVS_CT_LABELS_LEN);
 	}
 }
 
@@ -134,7 +134,7 @@
 	key->ct.state = state;
 	key->ct.zone = zone->id;
 	key->ct.mark = ovs_ct_get_mark(ct);
-	ovs_ct_get_label(ct, &key->ct.label);
+	ovs_ct_get_labels(ct, &key->ct.labels);
 }
 
 /* Update 'key' based on skb->nfct. If 'post_ct' is true, then OVS has
@@ -151,6 +151,8 @@
 	ct = nf_ct_get(skb, &ctinfo);
 	if (ct) {
 		state = ovs_ct_get_state(ctinfo);
+		if (!nf_ct_is_confirmed(ct))
+			state |= OVS_CS_F_NEW;
 		if (ct->master)
 			state |= OVS_CS_F_RELATED;
 		zone = nf_ct_zone(ct);
@@ -167,7 +169,7 @@
 
 int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
 {
-	if (nla_put_u8(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
+	if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
 		return -EMSGSIZE;
 
 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
@@ -179,8 +181,8 @@
 		return -EMSGSIZE;
 
 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-	    nla_put(skb, OVS_KEY_ATTR_CT_LABEL, sizeof(key->ct.label),
-		    &key->ct.label))
+	    nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(key->ct.labels),
+		    &key->ct.labels))
 		return -EMSGSIZE;
 
 	return 0;
@@ -213,18 +215,15 @@
 #endif
 }
 
-static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key,
-			    const struct ovs_key_ct_label *label,
-			    const struct ovs_key_ct_label *mask)
+static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key,
+			     const struct ovs_key_ct_labels *labels,
+			     const struct ovs_key_ct_labels *mask)
 {
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn_labels *cl;
 	struct nf_conn *ct;
 	int err;
 
-	if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS))
-		return -ENOTSUPP;
-
 	/* The connection could be invalid, in which case set_label is no-op.*/
 	ct = nf_ct_get(skb, &ctinfo);
 	if (!ct)
@@ -235,15 +234,15 @@
 		nf_ct_labels_ext_add(ct);
 		cl = nf_ct_labels_find(ct);
 	}
-	if (!cl || cl->words * sizeof(long) < OVS_CT_LABEL_LEN)
+	if (!cl || cl->words * sizeof(long) < OVS_CT_LABELS_LEN)
 		return -ENOSPC;
 
-	err = nf_connlabels_replace(ct, (u32 *)label, (u32 *)mask,
-				    OVS_CT_LABEL_LEN / sizeof(u32));
+	err = nf_connlabels_replace(ct, (u32 *)labels, (u32 *)mask,
+				    OVS_CT_LABELS_LEN / sizeof(u32));
 	if (err)
 		return err;
 
-	ovs_ct_get_label(ct, &key->ct.label);
+	ovs_ct_get_labels(ct, &key->ct.labels);
 	return 0;
 }
 
@@ -304,7 +303,7 @@
 		int err;
 
 		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
-		err = ip_defrag(skb, user);
+		err = ip_defrag(net, skb, user);
 		if (err)
 			return err;
 
@@ -315,7 +314,7 @@
 		struct sk_buff *reasm;
 
 		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
-		reasm = nf_ct_frag6_gather(skb, user);
+		reasm = nf_ct_frag6_gather(net, skb, user);
 		if (!reasm)
 			return -EINPROGRESS;
 
@@ -377,7 +376,7 @@
 	return true;
 }
 
-static int __ovs_ct_lookup(struct net *net, const struct sw_flow_key *key,
+static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
 			   const struct ovs_conntrack_info *info,
 			   struct sk_buff *skb)
 {
@@ -408,6 +407,8 @@
 		}
 	}
 
+	ovs_ct_update_key(skb, key, true);
+
 	return 0;
 }
 
@@ -430,8 +431,6 @@
 		err = __ovs_ct_lookup(net, key, info, skb);
 		if (err)
 			return err;
-
-		ovs_ct_update_key(skb, key, true);
 	}
 
 	return 0;
@@ -460,17 +459,15 @@
 	if (nf_conntrack_confirm(skb) != NF_ACCEPT)
 		return -EINVAL;
 
-	ovs_ct_update_key(skb, key, true);
-
 	return 0;
 }
 
-static bool label_nonzero(const struct ovs_key_ct_label *label)
+static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
 {
 	size_t i;
 
-	for (i = 0; i < sizeof(*label); i++)
-		if (label->ct_label[i])
+	for (i = 0; i < sizeof(*labels); i++)
+		if (labels->ct_labels[i])
 			return true;
 
 	return false;
@@ -493,7 +490,7 @@
 			return err;
 	}
 
-	if (info->flags & OVS_CT_F_COMMIT)
+	if (info->commit)
 		err = ovs_ct_commit(net, key, info, skb);
 	else
 		err = ovs_ct_lookup(net, key, info, skb);
@@ -506,9 +503,9 @@
 		if (err)
 			goto err;
 	}
-	if (label_nonzero(&info->label.mask))
-		err = ovs_ct_set_label(skb, key, &info->label.value,
-				       &info->label.mask);
+	if (labels_nonzero(&info->labels.mask))
+		err = ovs_ct_set_labels(skb, key, &info->labels.value,
+					&info->labels.mask);
 err:
 	skb_push(skb, nh_ofs);
 	return err;
@@ -539,14 +536,13 @@
 }
 
 static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
-	[OVS_CT_ATTR_FLAGS]	= { .minlen = sizeof(u32),
-				    .maxlen = sizeof(u32) },
+	[OVS_CT_ATTR_COMMIT]	= { .minlen = 0, .maxlen = 0 },
 	[OVS_CT_ATTR_ZONE]	= { .minlen = sizeof(u16),
 				    .maxlen = sizeof(u16) },
 	[OVS_CT_ATTR_MARK]	= { .minlen = sizeof(struct md_mark),
 				    .maxlen = sizeof(struct md_mark) },
-	[OVS_CT_ATTR_LABEL]	= { .minlen = sizeof(struct md_label),
-				    .maxlen = sizeof(struct md_label) },
+	[OVS_CT_ATTR_LABELS]	= { .minlen = sizeof(struct md_labels),
+				    .maxlen = sizeof(struct md_labels) },
 	[OVS_CT_ATTR_HELPER]	= { .minlen = 1,
 				    .maxlen = NF_CT_HELPER_NAME_LEN }
 };
@@ -576,8 +572,8 @@
 		}
 
 		switch (type) {
-		case OVS_CT_ATTR_FLAGS:
-			info->flags = nla_get_u32(a);
+		case OVS_CT_ATTR_COMMIT:
+			info->commit = true;
 			break;
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 		case OVS_CT_ATTR_ZONE:
@@ -588,15 +584,23 @@
 		case OVS_CT_ATTR_MARK: {
 			struct md_mark *mark = nla_data(a);
 
+			if (!mark->mask) {
+				OVS_NLERR(log, "ct_mark mask cannot be 0");
+				return -EINVAL;
+			}
 			info->mark = *mark;
 			break;
 		}
 #endif
 #ifdef CONFIG_NF_CONNTRACK_LABELS
-		case OVS_CT_ATTR_LABEL: {
-			struct md_label *label = nla_data(a);
+		case OVS_CT_ATTR_LABELS: {
+			struct md_labels *labels = nla_data(a);
 
-			info->label = *label;
+			if (!labels_nonzero(&labels->mask)) {
+				OVS_NLERR(log, "ct_labels mask cannot be 0");
+				return -EINVAL;
+			}
+			info->labels = *labels;
 			break;
 		}
 #endif
@@ -633,7 +637,7 @@
 	    attr == OVS_KEY_ATTR_CT_MARK)
 		return true;
 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-	    attr == OVS_KEY_ATTR_CT_LABEL) {
+	    attr == OVS_KEY_ATTR_CT_LABELS) {
 		struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
 
 		return ovs_net->xt_label;
@@ -701,18 +705,19 @@
 	if (!start)
 		return -EMSGSIZE;
 
-	if (nla_put_u32(skb, OVS_CT_ATTR_FLAGS, ct_info->flags))
+	if (ct_info->commit && nla_put_flag(skb, OVS_CT_ATTR_COMMIT))
 		return -EMSGSIZE;
 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
 	    nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
 		return -EMSGSIZE;
-	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && ct_info->mark.mask &&
 	    nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
 		    &ct_info->mark))
 		return -EMSGSIZE;
 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-	    nla_put(skb, OVS_CT_ATTR_LABEL, sizeof(ct_info->label),
-		    &ct_info->label))
+	    labels_nonzero(&ct_info->labels.mask) &&
+	    nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
+		    &ct_info->labels))
 		return -EMSGSIZE;
 	if (ct_info->helper) {
 		if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
@@ -737,7 +742,7 @@
 
 void ovs_ct_init(struct net *net)
 {
-	unsigned int n_bits = sizeof(struct ovs_key_ct_label) * BITS_PER_BYTE;
+	unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
 	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
 
 	if (nf_connlabels_get(net, n_bits)) {
diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h
index 43f5dd7..82e0dfc 100644
--- a/net/openvswitch/conntrack.h
+++ b/net/openvswitch/conntrack.h
@@ -34,6 +34,10 @@
 void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
 int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb);
 void ovs_ct_free_action(const struct nlattr *a);
+
+#define CT_SUPPORTED_MASK (OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | \
+			   OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR | \
+			   OVS_CS_F_INVALID | OVS_CS_F_TRACKED)
 #else
 #include <linux/errno.h>
 
@@ -72,7 +76,7 @@
 	key->ct.state = 0;
 	key->ct.zone = 0;
 	key->ct.mark = 0;
-	memset(&key->ct.label, 0, sizeof(key->ct.label));
+	memset(&key->ct.labels, 0, sizeof(key->ct.labels));
 }
 
 static inline int ovs_ct_put_key(const struct sw_flow_key *key,
@@ -82,5 +86,7 @@
 }
 
 static inline void ovs_ct_free_action(const struct nlattr *a) { }
+
+#define CT_SUPPORTED_MASK 0
 #endif /* CONFIG_NF_CONNTRACK */
 #endif /* ovs_conntrack.h */
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index a758280..5633172 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -489,9 +489,8 @@
 
 	if (upcall_info->egress_tun_info) {
 		nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
-		err = ovs_nla_put_egress_tunnel_key(user_skb,
-						    upcall_info->egress_tun_info,
-						    upcall_info->egress_tun_opts);
+		err = ovs_nla_put_tunnel_info(user_skb,
+					      upcall_info->egress_tun_info);
 		BUG_ON(err);
 		nla_nest_end(user_skb, nla);
 	}
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index f88038a..67bdecd 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -117,7 +117,6 @@
  */
 struct dp_upcall_info {
 	struct ip_tunnel_info *egress_tun_info;
-	const void *egress_tun_opts;
 	const struct nlattr *userdata;
 	const struct nlattr *actions;
 	int actions_len;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index c8db44a..0ea128e 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -698,8 +698,7 @@
 {
 	/* Extract metadata from packet. */
 	if (tun_info) {
-		if (ip_tunnel_info_af(tun_info) != AF_INET)
-			return -EINVAL;
+		key->tun_proto = ip_tunnel_info_af(tun_info);
 		memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
 
 		if (tun_info->options_len) {
@@ -714,6 +713,7 @@
 			key->tun_opts_len = 0;
 		}
 	} else  {
+		key->tun_proto = 0;
 		key->tun_opts_len = 0;
 		memset(&key->tun_key, 0, sizeof(key->tun_key));
 	}
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index fe527d2..1d055c5 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -63,6 +63,7 @@
 		u32	skb_mark;	/* SKB mark. */
 		u16	in_port;	/* Input switch port (or DP_MAX_PORTS). */
 	} __packed phy; /* Safe when right after 'tun_key'. */
+	u8 tun_proto;			/* Protocol of encapsulating tunnel. */
 	u32 ovs_flow_hash;		/* Datapath computed hash value.  */
 	u32 recirc_id;			/* Recirculation ID.  */
 	struct {
@@ -116,7 +117,7 @@
 		u16 zone;
 		u32 mark;
 		u8 state;
-		struct ovs_key_ct_label label;
+		struct ovs_key_ct_labels labels;
 	} ct;
 
 } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 5c030a4..907d6fd 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -262,8 +262,8 @@
 	 * updating this function.
 	 */
 	return    nla_total_size(8)    /* OVS_TUNNEL_KEY_ATTR_ID */
-		+ nla_total_size(4)    /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
-		+ nla_total_size(4)    /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
+		+ nla_total_size(16)   /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */
+		+ nla_total_size(16)   /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */
 		+ nla_total_size(1)    /* OVS_TUNNEL_KEY_ATTR_TOS */
 		+ nla_total_size(1)    /* OVS_TUNNEL_KEY_ATTR_TTL */
 		+ nla_total_size(0)    /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
@@ -291,10 +291,10 @@
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_DP_HASH */
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_RECIRC_ID */
-		+ nla_total_size(1)   /* OVS_KEY_ATTR_CT_STATE */
+		+ nla_total_size(4)   /* OVS_KEY_ATTR_CT_STATE */
 		+ nla_total_size(2)   /* OVS_KEY_ATTR_CT_ZONE */
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_CT_MARK */
-		+ nla_total_size(16)  /* OVS_KEY_ATTR_CT_LABEL */
+		+ nla_total_size(16)  /* OVS_KEY_ATTR_CT_LABELS */
 		+ nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
 		+ nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_VLAN */
@@ -323,6 +323,8 @@
 	[OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS]   = { .len = OVS_ATTR_VARIABLE },
 	[OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS]    = { .len = OVS_ATTR_NESTED,
 						.next = ovs_vxlan_ext_key_lens },
+	[OVS_TUNNEL_KEY_ATTR_IPV6_SRC]      = { .len = sizeof(struct in6_addr) },
+	[OVS_TUNNEL_KEY_ATTR_IPV6_DST]      = { .len = sizeof(struct in6_addr) },
 };
 
 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
@@ -349,10 +351,10 @@
 	[OVS_KEY_ATTR_TUNNEL]	 = { .len = OVS_ATTR_NESTED,
 				     .next = ovs_tunnel_key_lens, },
 	[OVS_KEY_ATTR_MPLS]	 = { .len = sizeof(struct ovs_key_mpls) },
-	[OVS_KEY_ATTR_CT_STATE]	 = { .len = sizeof(u8) },
+	[OVS_KEY_ATTR_CT_STATE]	 = { .len = sizeof(u32) },
 	[OVS_KEY_ATTR_CT_ZONE]	 = { .len = sizeof(u16) },
 	[OVS_KEY_ATTR_CT_MARK]	 = { .len = sizeof(u32) },
-	[OVS_KEY_ATTR_CT_LABEL]	 = { .len = sizeof(struct ovs_key_ct_label) },
+	[OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
 };
 
 static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
@@ -542,15 +544,15 @@
 	return 0;
 }
 
-static int ipv4_tun_from_nlattr(const struct nlattr *attr,
-				struct sw_flow_match *match, bool is_mask,
-				bool log)
+static int ip_tun_from_nlattr(const struct nlattr *attr,
+			      struct sw_flow_match *match, bool is_mask,
+			      bool log)
 {
-	struct nlattr *a;
-	int rem;
-	bool ttl = false;
+	bool ttl = false, ipv4 = false, ipv6 = false;
 	__be16 tun_flags = 0;
 	int opts_type = 0;
+	struct nlattr *a;
+	int rem;
 
 	nla_for_each_nested(a, attr, rem) {
 		int type = nla_type(a);
@@ -578,10 +580,22 @@
 		case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
 			SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
 					nla_get_in_addr(a), is_mask);
+			ipv4 = true;
 			break;
 		case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
 			SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
 					nla_get_in_addr(a), is_mask);
+			ipv4 = true;
+			break;
+		case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
+			SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
+					nla_get_in6_addr(a), is_mask);
+			ipv6 = true;
+			break;
+		case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
+			SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
+					nla_get_in6_addr(a), is_mask);
+			ipv6 = true;
 			break;
 		case OVS_TUNNEL_KEY_ATTR_TOS:
 			SW_FLOW_KEY_PUT(match, tun_key.tos,
@@ -636,28 +650,46 @@
 			opts_type = type;
 			break;
 		default:
-			OVS_NLERR(log, "Unknown IPv4 tunnel attribute %d",
+			OVS_NLERR(log, "Unknown IP tunnel attribute %d",
 				  type);
 			return -EINVAL;
 		}
 	}
 
 	SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
+	if (is_mask)
+		SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true);
+	else
+		SW_FLOW_KEY_PUT(match, tun_proto, ipv6 ? AF_INET6 : AF_INET,
+				false);
 
 	if (rem > 0) {
-		OVS_NLERR(log, "IPv4 tunnel attribute has %d unknown bytes.",
+		OVS_NLERR(log, "IP tunnel attribute has %d unknown bytes.",
 			  rem);
 		return -EINVAL;
 	}
 
+	if (ipv4 && ipv6) {
+		OVS_NLERR(log, "Mixed IPv4 and IPv6 tunnel attributes");
+		return -EINVAL;
+	}
+
 	if (!is_mask) {
-		if (!match->key->tun_key.u.ipv4.dst) {
+		if (!ipv4 && !ipv6) {
+			OVS_NLERR(log, "IP tunnel dst address not specified");
+			return -EINVAL;
+		}
+		if (ipv4 && !match->key->tun_key.u.ipv4.dst) {
 			OVS_NLERR(log, "IPv4 tunnel dst address is zero");
 			return -EINVAL;
 		}
+		if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) {
+			OVS_NLERR(log, "IPv6 tunnel dst address is zero");
+			return -EINVAL;
+		}
 
 		if (!ttl) {
-			OVS_NLERR(log, "IPv4 tunnel TTL not specified.");
+			OVS_NLERR(log, "IP tunnel TTL not specified.");
 			return -EINVAL;
 		}
 	}
@@ -682,21 +714,36 @@
 	return 0;
 }
 
-static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
-				const struct ip_tunnel_key *output,
-				const void *tun_opts, int swkey_tun_opts_len)
+static int __ip_tun_to_nlattr(struct sk_buff *skb,
+			      const struct ip_tunnel_key *output,
+			      const void *tun_opts, int swkey_tun_opts_len,
+			      unsigned short tun_proto)
 {
 	if (output->tun_flags & TUNNEL_KEY &&
 	    nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
 		return -EMSGSIZE;
-	if (output->u.ipv4.src &&
-	    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
-			    output->u.ipv4.src))
-		return -EMSGSIZE;
-	if (output->u.ipv4.dst &&
-	    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
-			    output->u.ipv4.dst))
-		return -EMSGSIZE;
+	switch (tun_proto) {
+	case AF_INET:
+		if (output->u.ipv4.src &&
+		    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
+				    output->u.ipv4.src))
+			return -EMSGSIZE;
+		if (output->u.ipv4.dst &&
+		    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
+				    output->u.ipv4.dst))
+			return -EMSGSIZE;
+		break;
+	case AF_INET6:
+		if (!ipv6_addr_any(&output->u.ipv6.src) &&
+		    nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
+				     &output->u.ipv6.src))
+			return -EMSGSIZE;
+		if (!ipv6_addr_any(&output->u.ipv6.dst) &&
+		    nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
+				     &output->u.ipv6.dst))
+			return -EMSGSIZE;
+		break;
+	}
 	if (output->tos &&
 	    nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
 		return -EMSGSIZE;
@@ -717,7 +764,7 @@
 	if ((output->tun_flags & TUNNEL_OAM) &&
 	    nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
 		return -EMSGSIZE;
-	if (tun_opts) {
+	if (swkey_tun_opts_len) {
 		if (output->tun_flags & TUNNEL_GENEVE_OPT &&
 		    nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
 			    swkey_tun_opts_len, tun_opts))
@@ -730,9 +777,10 @@
 	return 0;
 }
 
-static int ipv4_tun_to_nlattr(struct sk_buff *skb,
-			      const struct ip_tunnel_key *output,
-			      const void *tun_opts, int swkey_tun_opts_len)
+static int ip_tun_to_nlattr(struct sk_buff *skb,
+			    const struct ip_tunnel_key *output,
+			    const void *tun_opts, int swkey_tun_opts_len,
+			    unsigned short tun_proto)
 {
 	struct nlattr *nla;
 	int err;
@@ -741,7 +789,8 @@
 	if (!nla)
 		return -EMSGSIZE;
 
-	err = __ipv4_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len);
+	err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len,
+				 tun_proto);
 	if (err)
 		return err;
 
@@ -749,13 +798,13 @@
 	return 0;
 }
 
-int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
-				  const struct ip_tunnel_info *egress_tun_info,
-				  const void *egress_tun_opts)
+int ovs_nla_put_tunnel_info(struct sk_buff *skb,
+			    struct ip_tunnel_info *tun_info)
 {
-	return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key,
-				    egress_tun_opts,
-				    egress_tun_info->options_len);
+	return __ip_tun_to_nlattr(skb, &tun_info->key,
+				  ip_tunnel_info_opts(tun_info),
+				  tun_info->options_len,
+				  ip_tunnel_info_af(tun_info));
 }
 
 static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
@@ -806,15 +855,21 @@
 		*attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
 	}
 	if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
-		if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
-					 is_mask, log) < 0)
+		if (ip_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
+				       is_mask, log) < 0)
 			return -EINVAL;
 		*attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
 	}
 
 	if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
 	    ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
-		u8 ct_state = nla_get_u8(a[OVS_KEY_ATTR_CT_STATE]);
+		u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
+
+		if (ct_state & ~CT_SUPPORTED_MASK) {
+			OVS_NLERR(log, "ct_state flags %08x unsupported",
+				  ct_state);
+			return -EINVAL;
+		}
 
 		SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask);
 		*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
@@ -833,14 +888,14 @@
 		SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
 		*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
 	}
-	if (*attrs & (1 << OVS_KEY_ATTR_CT_LABEL) &&
-	    ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABEL)) {
-		const struct ovs_key_ct_label *cl;
+	if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) &&
+	    ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) {
+		const struct ovs_key_ct_labels *cl;
 
-		cl = nla_data(a[OVS_KEY_ATTR_CT_LABEL]);
-		SW_FLOW_KEY_MEMCPY(match, ct.label, cl->ct_label,
+		cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
+		SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
 				   sizeof(*cl), is_mask);
-		*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABEL);
+		*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
 	}
 	return 0;
 }
@@ -1093,6 +1148,9 @@
 		} else {
 			memset(nla_data(nla), val, nla_len(nla));
 		}
+
+		if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
+			*(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
 	}
 }
 
@@ -1194,7 +1252,7 @@
 			/* The userspace does not send tunnel attributes that
 			 * are 0, but we should not wildcard them nonetheless.
 			 */
-			if (match->key->tun_key.u.ipv4.dst)
+			if (match->key->tun_proto)
 				SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
 							 0xff, true);
 
@@ -1367,14 +1425,14 @@
 	if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
 		goto nla_put_failure;
 
-	if ((swkey->tun_key.u.ipv4.dst || is_mask)) {
+	if ((swkey->tun_proto || is_mask)) {
 		const void *opts = NULL;
 
 		if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
 			opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
 
-		if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts,
-				       swkey->tun_opts_len))
+		if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
+				     swkey->tun_opts_len, swkey->tun_proto))
 			goto nla_put_failure;
 	}
 
@@ -1877,7 +1935,7 @@
 	int err = 0, start, opts_type;
 
 	ovs_match_init(&match, &key, NULL);
-	opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
+	opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
 	if (opts_type < 0)
 		return opts_type;
 
@@ -1913,6 +1971,8 @@
 
 	tun_info = &tun_dst->u.tun_info;
 	tun_info->mode = IP_TUNNEL_INFO_TX;
+	if (key.tun_proto == AF_INET6)
+		tun_info->mode |= IP_TUNNEL_INFO_IPV6;
 	tun_info->key = key.tun_key;
 
 	/* We need to store the options in the action itself since
@@ -1973,7 +2033,7 @@
 	case OVS_KEY_ATTR_PRIORITY:
 	case OVS_KEY_ATTR_SKB_MARK:
 	case OVS_KEY_ATTR_CT_MARK:
-	case OVS_KEY_ATTR_CT_LABEL:
+	case OVS_KEY_ATTR_CT_LABELS:
 	case OVS_KEY_ATTR_ETHERNET:
 		break;
 
@@ -2374,10 +2434,7 @@
 		if (!start)
 			return -EMSGSIZE;
 
-		err = ipv4_tun_to_nlattr(skb, &tun_info->key,
-					 tun_info->options_len ?
-					     ip_tunnel_info_opts(tun_info) : NULL,
-					 tun_info->options_len);
+		err = ovs_nla_put_tunnel_info(skb, tun_info);
 		if (err)
 			return err;
 		nla_nest_end(skb, start);
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 6ca3f0b..47dd142 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -55,9 +55,9 @@
 int ovs_nla_get_match(struct net *, struct sw_flow_match *,
 		      const struct nlattr *key, const struct nlattr *mask,
 		      bool log);
-int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
-				  const struct ip_tunnel_info *,
-				  const void *egress_tun_opts);
+
+int ovs_nla_put_tunnel_info(struct sk_buff *skb,
+			    struct ip_tunnel_info *tun_info);
 
 bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index f2ea83b..d073fff 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -93,7 +93,8 @@
 
 	/* Initialize the default stat node. */
 	stats = kmem_cache_alloc_node(flow_stats_cache,
-				      GFP_KERNEL | __GFP_ZERO, 0);
+				      GFP_KERNEL | __GFP_ZERO,
+				      node_online(0) ? 0 : NUMA_NO_NODE);
 	if (!stats)
 		goto err;
 
@@ -427,7 +428,7 @@
 
 static int flow_key_start(const struct sw_flow_key *key)
 {
-	if (key->tun_key.u.ipv4.dst)
+	if (key->tun_proto)
 		return 0;
 	else
 		return rounddown(offsetof(struct sw_flow_key, phy),
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 2735e9c..efb736b 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -52,18 +52,6 @@
 	return 0;
 }
 
-static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				      struct dp_upcall_info *upcall)
-{
-	struct geneve_port *geneve_port = geneve_vport(vport);
-	struct net *net = ovs_dp_get_net(vport->dp);
-	__be16 dport = htons(geneve_port->port_no);
-	__be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
-
-	return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
-					  skb, IPPROTO_UDP, sport, dport);
-}
-
 static struct vport *geneve_tnl_create(const struct vport_parms *parms)
 {
 	struct net *net = ovs_dp_get_net(parms->dp);
@@ -128,9 +116,8 @@
 	.create		= geneve_create,
 	.destroy	= ovs_netdev_tunnel_destroy,
 	.get_options	= geneve_get_options,
-	.send		= ovs_netdev_send,
+	.send		= dev_queue_xmit,
 	.owner          = THIS_MODULE,
-	.get_egress_tun_info	= geneve_get_egress_tun_info,
 };
 
 static int __init ovs_geneve_tnl_init(void)
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 4d24481..c3257d7 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -84,18 +84,10 @@
 	return ovs_netdev_link(vport, parms->name);
 }
 
-static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				   struct dp_upcall_info *upcall)
-{
-	return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
-					  skb, IPPROTO_GRE, 0, 0);
-}
-
 static struct vport_ops ovs_gre_vport_ops = {
 	.type		= OVS_VPORT_TYPE_GRE,
 	.create		= gre_create,
-	.send		= ovs_netdev_send,
-	.get_egress_tun_info	= gre_get_egress_tun_info,
+	.send		= dev_queue_xmit,
 	.destroy	= ovs_netdev_tunnel_destroy,
 	.owner		= THIS_MODULE,
 };
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 388b8a6..ec76398 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -106,12 +106,45 @@
 	free_netdev(dev);
 }
 
+static struct rtnl_link_stats64 *
+internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	int i;
+
+	memset(stats, 0, sizeof(*stats));
+	stats->rx_errors  = dev->stats.rx_errors;
+	stats->tx_errors  = dev->stats.tx_errors;
+	stats->tx_dropped = dev->stats.tx_dropped;
+	stats->rx_dropped = dev->stats.rx_dropped;
+
+	for_each_possible_cpu(i) {
+		const struct pcpu_sw_netstats *percpu_stats;
+		struct pcpu_sw_netstats local_stats;
+		unsigned int start;
+
+		percpu_stats = per_cpu_ptr(dev->tstats, i);
+
+		do {
+			start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
+			local_stats = *percpu_stats;
+		} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
+
+		stats->rx_bytes         += local_stats.rx_bytes;
+		stats->rx_packets       += local_stats.rx_packets;
+		stats->tx_bytes         += local_stats.tx_bytes;
+		stats->tx_packets       += local_stats.tx_packets;
+	}
+
+	return stats;
+}
+
 static const struct net_device_ops internal_dev_netdev_ops = {
 	.ndo_open = internal_dev_open,
 	.ndo_stop = internal_dev_stop,
 	.ndo_start_xmit = internal_dev_xmit,
 	.ndo_set_mac_address = eth_mac_addr,
 	.ndo_change_mtu = internal_dev_change_mtu,
+	.ndo_get_stats64 = internal_get_stats,
 };
 
 static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -161,6 +194,11 @@
 		err = -ENOMEM;
 		goto error_free_vport;
 	}
+	vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+	if (!vport->dev->tstats) {
+		err = -ENOMEM;
+		goto error_free_netdev;
+	}
 
 	dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
 	internal_dev = internal_dev_priv(vport->dev);
@@ -173,7 +211,7 @@
 	rtnl_lock();
 	err = register_netdevice(vport->dev);
 	if (err)
-		goto error_free_netdev;
+		goto error_unlock;
 
 	dev_set_promiscuity(vport->dev, 1);
 	rtnl_unlock();
@@ -181,8 +219,10 @@
 
 	return vport;
 
-error_free_netdev:
+error_unlock:
 	rtnl_unlock();
+	free_percpu(vport->dev->tstats);
+error_free_netdev:
 	free_netdev(vport->dev);
 error_free_vport:
 	ovs_vport_free(vport);
@@ -198,26 +238,25 @@
 
 	/* unregister_netdevice() waits for an RCU grace period. */
 	unregister_netdevice(vport->dev);
-
+	free_percpu(vport->dev->tstats);
 	rtnl_unlock();
 }
 
-static void internal_dev_recv(struct vport *vport, struct sk_buff *skb)
+static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
 {
-	struct net_device *netdev = vport->dev;
+	struct net_device *netdev = skb->dev;
 	struct pcpu_sw_netstats *stats;
 
 	if (unlikely(!(netdev->flags & IFF_UP))) {
 		kfree_skb(skb);
 		netdev->stats.rx_dropped++;
-		return;
+		return NETDEV_TX_OK;
 	}
 
 	skb_dst_drop(skb);
 	nf_reset(skb);
 	secpath_reset(skb);
 
-	skb->dev = netdev;
 	skb->pkt_type = PACKET_HOST;
 	skb->protocol = eth_type_trans(skb, netdev);
 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
@@ -229,6 +268,7 @@
 	u64_stats_update_end(&stats->syncp);
 
 	netif_rx(skb);
+	return NETDEV_TX_OK;
 }
 
 static struct vport_ops ovs_internal_vport_ops = {
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index f7e8dcc..b327368 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -190,37 +190,6 @@
 }
 EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
 
-static unsigned int packet_length(const struct sk_buff *skb)
-{
-	unsigned int length = skb->len - ETH_HLEN;
-
-	if (skb->protocol == htons(ETH_P_8021Q))
-		length -= VLAN_HLEN;
-
-	return length;
-}
-
-void ovs_netdev_send(struct vport *vport, struct sk_buff *skb)
-{
-	int mtu = vport->dev->mtu;
-
-	if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
-		net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
-				     vport->dev->name,
-				     packet_length(skb), mtu);
-		vport->dev->stats.tx_errors++;
-		goto drop;
-	}
-
-	skb->dev = vport->dev;
-	dev_queue_xmit(skb);
-	return;
-
-drop:
-	kfree_skb(skb);
-}
-EXPORT_SYMBOL_GPL(ovs_netdev_send);
-
 /* Returns null if this device is not attached to a datapath. */
 struct vport *ovs_netdev_get_vport(struct net_device *dev)
 {
@@ -235,7 +204,7 @@
 	.type		= OVS_VPORT_TYPE_NETDEV,
 	.create		= netdev_create,
 	.destroy	= netdev_destroy,
-	.send		= ovs_netdev_send,
+	.send		= dev_queue_xmit,
 };
 
 int __init ovs_netdev_init(void)
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index bf22fce..19e29c1 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -27,7 +27,6 @@
 struct vport *ovs_netdev_get_vport(struct net_device *dev);
 
 struct vport *ovs_netdev_link(struct vport *vport, const char *name);
-void ovs_netdev_send(struct vport *vport, struct sk_buff *skb);
 void ovs_netdev_detach_dev(struct vport *);
 
 int __init ovs_netdev_init(void);
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index fb3cdb8..1605691 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -146,32 +146,12 @@
 	return ovs_netdev_link(vport, parms->name);
 }
 
-static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				     struct dp_upcall_info *upcall)
-{
-	struct vxlan_dev *vxlan = netdev_priv(vport->dev);
-	struct net *net = ovs_dp_get_net(vport->dp);
-	unsigned short family = ip_tunnel_info_af(upcall->egress_tun_info);
-	__be16 dst_port = vxlan_dev_dst_port(vxlan, family);
-	__be16 src_port;
-	int port_min;
-	int port_max;
-
-	inet_get_local_port_range(net, &port_min, &port_max);
-	src_port = udp_flow_src_port(net, skb, 0, 0, true);
-
-	return ovs_tunnel_get_egress_info(upcall, net,
-					  skb, IPPROTO_UDP,
-					  src_port, dst_port);
-}
-
 static struct vport_ops ovs_vxlan_netdev_vport_ops = {
 	.type			= OVS_VPORT_TYPE_VXLAN,
 	.create			= vxlan_create,
 	.destroy		= ovs_netdev_tunnel_destroy,
 	.get_options		= vxlan_get_options,
-	.send			= ovs_netdev_send,
-	.get_egress_tun_info	= vxlan_get_egress_tun_info,
+	.send			= dev_queue_xmit,
 };
 
 static int __init ovs_vxlan_tnl_init(void)
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index dc81dc6..0ac0fd0 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -280,35 +280,19 @@
  */
 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
 {
-	struct net_device *dev = vport->dev;
-	int i;
+	const struct rtnl_link_stats64 *dev_stats;
+	struct rtnl_link_stats64 temp;
 
-	memset(stats, 0, sizeof(*stats));
-	stats->rx_errors  = dev->stats.rx_errors;
-	stats->tx_errors  = dev->stats.tx_errors;
-	stats->tx_dropped = dev->stats.tx_dropped;
-	stats->rx_dropped = dev->stats.rx_dropped;
+	dev_stats = dev_get_stats(vport->dev, &temp);
+	stats->rx_errors  = dev_stats->rx_errors;
+	stats->tx_errors  = dev_stats->tx_errors;
+	stats->tx_dropped = dev_stats->tx_dropped;
+	stats->rx_dropped = dev_stats->rx_dropped;
 
-	stats->rx_dropped += atomic_long_read(&dev->rx_dropped);
-	stats->tx_dropped += atomic_long_read(&dev->tx_dropped);
-
-	for_each_possible_cpu(i) {
-		const struct pcpu_sw_netstats *percpu_stats;
-		struct pcpu_sw_netstats local_stats;
-		unsigned int start;
-
-		percpu_stats = per_cpu_ptr(dev->tstats, i);
-
-		do {
-			start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
-			local_stats = *percpu_stats;
-		} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
-
-		stats->rx_bytes		+= local_stats.rx_bytes;
-		stats->rx_packets	+= local_stats.rx_packets;
-		stats->tx_bytes		+= local_stats.tx_bytes;
-		stats->tx_packets	+= local_stats.tx_packets;
-	}
+	stats->rx_bytes	  = dev_stats->rx_bytes;
+	stats->rx_packets = dev_stats->rx_packets;
+	stats->tx_bytes	  = dev_stats->tx_bytes;
+	stats->tx_packets = dev_stats->tx_packets;
 }
 
 /**
@@ -460,6 +444,15 @@
 
 	OVS_CB(skb)->input_vport = vport;
 	OVS_CB(skb)->mru = 0;
+	if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) {
+		u32 mark;
+
+		mark = skb->mark;
+		skb_scrub_packet(skb, true);
+		skb->mark = mark;
+		tun_info = NULL;
+	}
+
 	/* Extract flow from 'skb' into 'key'. */
 	error = ovs_flow_key_extract(tun_info, skb, &key);
 	if (unlikely(error)) {
@@ -487,60 +480,32 @@
 }
 EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
 
-int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
-			       struct net *net,
-			       struct sk_buff *skb,
-			       u8 ipproto,
-			       __be16 tp_src,
-			       __be16 tp_dst)
+static unsigned int packet_length(const struct sk_buff *skb)
 {
-	struct ip_tunnel_info *egress_tun_info = upcall->egress_tun_info;
-	const struct ip_tunnel_info *tun_info = skb_tunnel_info(skb);
-	const struct ip_tunnel_key *tun_key;
-	u32 skb_mark = skb->mark;
-	struct rtable *rt;
-	struct flowi4 fl;
+	unsigned int length = skb->len - ETH_HLEN;
 
-	if (unlikely(!tun_info))
-		return -EINVAL;
-	if (ip_tunnel_info_af(tun_info) != AF_INET)
-		return -EINVAL;
+	if (skb->protocol == htons(ETH_P_8021Q))
+		length -= VLAN_HLEN;
 
-	tun_key = &tun_info->key;
-
-	/* Route lookup to get srouce IP address.
-	 * The process may need to be changed if the corresponding process
-	 * in vports ops changed.
-	 */
-	rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
-	if (IS_ERR(rt))
-		return PTR_ERR(rt);
-
-	ip_rt_put(rt);
-
-	/* Generate egress_tun_info based on tun_info,
-	 * saddr, tp_src and tp_dst
-	 */
-	ip_tunnel_key_init(&egress_tun_info->key,
-			   fl.saddr, tun_key->u.ipv4.dst,
-			   tun_key->tos,
-			   tun_key->ttl,
-			   tp_src, tp_dst,
-			   tun_key->tun_id,
-			   tun_key->tun_flags);
-	egress_tun_info->options_len = tun_info->options_len;
-	egress_tun_info->mode = tun_info->mode;
-	upcall->egress_tun_opts = ip_tunnel_info_opts(egress_tun_info);
-	return 0;
+	return length;
 }
-EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
 
-int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				  struct dp_upcall_info *upcall)
+void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
 {
-	/* get_egress_tun_info() is only implemented on tunnel ports. */
-	if (unlikely(!vport->ops->get_egress_tun_info))
-		return -EINVAL;
+	int mtu = vport->dev->mtu;
 
-	return vport->ops->get_egress_tun_info(vport, skb, upcall);
+	if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
+		net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
+				     vport->dev->name,
+				     packet_length(skb), mtu);
+		vport->dev->stats.tx_errors++;
+		goto drop;
+	}
+
+	skb->dev = vport->dev;
+	vport->ops->send(skb);
+	return;
+
+drop:
+	kfree_skb(skb);
 }
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index a413f3a..bdfd82a 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -27,7 +27,6 @@
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/u64_stats_sync.h>
-#include <net/route.h>
 
 #include "datapath.h"
 
@@ -53,16 +52,6 @@
 int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
 u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
 
-int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
-			       struct net *net,
-			       struct sk_buff *,
-			       u8 ipproto,
-			       __be16 tp_src,
-			       __be16 tp_dst);
-
-int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				  struct dp_upcall_info *upcall);
-
 /**
  * struct vport_portids - array of netlink portids of a vport.
  *                        must be protected by rcu.
@@ -140,8 +129,6 @@
  * have any configuration.
  * @send: Send a packet on the device.
  * zero for dropped packets or negative for error.
- * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for
- * a packet.
  */
 struct vport_ops {
 	enum ovs_vport_type type;
@@ -153,10 +140,7 @@
 	int (*set_options)(struct vport *, struct nlattr *);
 	int (*get_options)(const struct vport *, struct sk_buff *);
 
-	void (*send)(struct vport *, struct sk_buff *);
-	int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
-				   struct dp_upcall_info *upcall);
-
+	netdev_tx_t (*send) (struct sk_buff *skb);
 	struct module *owner;
 	struct list_head list;
 };
@@ -234,9 +218,6 @@
 	return rt;
 }
 
-static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
-{
-	vport->ops->send(vport, skb);
-}
+void ovs_vport_send(struct vport *vport, struct sk_buff *skb);
 
 #endif /* vport.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index aa4b15c..691660b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1423,7 +1423,7 @@
 	rcu_read_lock();
 	prog = rcu_dereference(f->bpf_prog);
 	if (prog)
-		ret = BPF_PROG_RUN(prog, skb) % num;
+		ret = bpf_prog_run_clear_cb(prog, skb) % num;
 	rcu_read_unlock();
 
 	return ret;
@@ -1439,17 +1439,17 @@
 {
 	struct packet_fanout *f = pt->af_packet_priv;
 	unsigned int num = READ_ONCE(f->num_members);
+	struct net *net = read_pnet(&f->net);
 	struct packet_sock *po;
 	unsigned int idx;
 
-	if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
-	    !num) {
+	if (!net_eq(dev_net(dev), net) || !num) {
 		kfree_skb(skb);
 		return 0;
 	}
 
 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
-		skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
+		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
 		if (!skb)
 			return 0;
 	}
@@ -1519,10 +1519,10 @@
 
 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
 {
-	if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout)
-		return true;
+	if (sk->sk_family != PF_PACKET)
+		return false;
 
-	return false;
+	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
 }
 
 static void fanout_init_data(struct packet_fanout *f)
@@ -1567,7 +1567,7 @@
 	if (copy_from_user(&fprog, data, len))
 		return -EFAULT;
 
-	ret = bpf_prog_create_from_user(&new, &fprog, NULL);
+	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
 	if (ret)
 		return ret;
 
@@ -1939,16 +1939,16 @@
 	return err;
 }
 
-static unsigned int run_filter(const struct sk_buff *skb,
-				      const struct sock *sk,
-				      unsigned int res)
+static unsigned int run_filter(struct sk_buff *skb,
+			       const struct sock *sk,
+			       unsigned int res)
 {
 	struct sk_filter *filter;
 
 	rcu_read_lock();
 	filter = rcu_dereference(sk->sk_filter);
 	if (filter != NULL)
-		res = SK_RUN_FILTER(filter, skb);
+		res = bpf_prog_run_clear_cb(filter->prog, skb);
 	rcu_read_unlock();
 
 	return res;
@@ -2630,6 +2630,7 @@
 	__be16 proto;
 	unsigned char *addr;
 	int err, reserve = 0;
+	struct sockcm_cookie sockc;
 	struct virtio_net_hdr vnet_hdr = { 0 };
 	int offset = 0;
 	int vnet_hdr_len;
@@ -2665,6 +2666,13 @@
 	if (unlikely(!(dev->flags & IFF_UP)))
 		goto out_unlock;
 
+	sockc.mark = sk->sk_mark;
+	if (msg->msg_controllen) {
+		err = sock_cmsg_send(sk, msg, &sockc);
+		if (unlikely(err))
+			goto out_unlock;
+	}
+
 	if (sock->type == SOCK_RAW)
 		reserve = dev->hard_header_len;
 	if (po->has_vnet_hdr) {
@@ -2774,7 +2782,7 @@
 	skb->protocol = proto;
 	skb->dev = dev;
 	skb->priority = sk->sk_priority;
-	skb->mark = sk->sk_mark;
+	skb->mark = sockc.mark;
 
 	packet_pick_tx_queue(dev, skb);
 
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index a2f28a6..384ea1e 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -72,13 +72,7 @@
 	rds_clear_recv_queue(rs);
 	rds_cong_remove_socket(rs);
 
-	/*
-	 * the binding lookup hash uses rcu, we need to
-	 * make sure we synchronize_rcu before we free our
-	 * entry
-	 */
 	rds_remove_bound(rs);
-	synchronize_rcu();
 
 	rds_send_drop_to(rs, NULL);
 	rds_rdma_drop_keys(rs);
@@ -588,6 +582,8 @@
 {
 	int ret;
 
+	rds_bind_lock_init();
+
 	ret = rds_conn_init();
 	if (ret)
 		goto out;
diff --git a/net/rds/bind.c b/net/rds/bind.c
index dd666fb..6192566 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -38,48 +38,50 @@
 #include <linux/ratelimit.h>
 #include "rds.h"
 
-#define BIND_HASH_SIZE 1024
-static struct hlist_head bind_hash_table[BIND_HASH_SIZE];
-static DEFINE_SPINLOCK(rds_bind_lock);
+struct bind_bucket {
+	rwlock_t                lock;
+	struct hlist_head	head;
+};
 
-static struct hlist_head *hash_to_bucket(__be32 addr, __be16 port)
+#define BIND_HASH_SIZE 1024
+static struct bind_bucket bind_hash_table[BIND_HASH_SIZE];
+
+static struct bind_bucket *hash_to_bucket(__be32 addr, __be16 port)
 {
 	return bind_hash_table + (jhash_2words((u32)addr, (u32)port, 0) &
 				  (BIND_HASH_SIZE - 1));
 }
 
-static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
+/* must hold either read or write lock (write lock for insert != NULL) */
+static struct rds_sock *rds_bind_lookup(struct bind_bucket *bucket,
+					__be32 addr, __be16 port,
 					struct rds_sock *insert)
 {
 	struct rds_sock *rs;
-	struct hlist_head *head = hash_to_bucket(addr, port);
+	struct hlist_head *head = &bucket->head;
 	u64 cmp;
 	u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
 
-	rcu_read_lock();
-	hlist_for_each_entry_rcu(rs, head, rs_bound_node) {
+	hlist_for_each_entry(rs, head, rs_bound_node) {
 		cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
 		      be16_to_cpu(rs->rs_bound_port);
 
 		if (cmp == needle) {
-			rcu_read_unlock();
+			rds_sock_addref(rs);
 			return rs;
 		}
 	}
-	rcu_read_unlock();
 
 	if (insert) {
 		/*
 		 * make sure our addr and port are set before
-		 * we are added to the list, other people
-		 * in rcu will find us as soon as the
-		 * hlist_add_head_rcu is done
+		 * we are added to the list.
 		 */
 		insert->rs_bound_addr = addr;
 		insert->rs_bound_port = port;
 		rds_sock_addref(insert);
 
-		hlist_add_head_rcu(&insert->rs_bound_node, head);
+		hlist_add_head(&insert->rs_bound_node, head);
 	}
 	return NULL;
 }
@@ -93,16 +95,21 @@
 struct rds_sock *rds_find_bound(__be32 addr, __be16 port)
 {
 	struct rds_sock *rs;
+	unsigned long flags;
+	struct bind_bucket *bucket = hash_to_bucket(addr, port);
 
-	rs = rds_bind_lookup(addr, port, NULL);
+	read_lock_irqsave(&bucket->lock, flags);
+	rs = rds_bind_lookup(bucket, addr, port, NULL);
+	read_unlock_irqrestore(&bucket->lock, flags);
 
-	if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
-		rds_sock_addref(rs);
-	else
+	if (rs && sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) {
+		rds_sock_put(rs);
 		rs = NULL;
+	}
 
 	rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
 		ntohs(port));
+
 	return rs;
 }
 
@@ -112,6 +119,7 @@
 	unsigned long flags;
 	int ret = -EADDRINUSE;
 	u16 rover, last;
+	struct bind_bucket *bucket;
 
 	if (*port != 0) {
 		rover = be16_to_cpu(*port);
@@ -121,42 +129,48 @@
 		last = rover - 1;
 	}
 
-	spin_lock_irqsave(&rds_bind_lock, flags);
-
 	do {
+		struct rds_sock *rrs;
 		if (rover == 0)
 			rover++;
-		if (!rds_bind_lookup(addr, cpu_to_be16(rover), rs)) {
+
+		bucket = hash_to_bucket(addr, cpu_to_be16(rover));
+		write_lock_irqsave(&bucket->lock, flags);
+		rrs = rds_bind_lookup(bucket, addr, cpu_to_be16(rover), rs);
+		write_unlock_irqrestore(&bucket->lock, flags);
+		if (!rrs) {
 			*port = rs->rs_bound_port;
 			ret = 0;
 			rdsdebug("rs %p binding to %pI4:%d\n",
 			  rs, &addr, (int)ntohs(*port));
 			break;
+		} else {
+			rds_sock_put(rrs);
 		}
 	} while (rover++ != last);
 
-	spin_unlock_irqrestore(&rds_bind_lock, flags);
-
 	return ret;
 }
 
 void rds_remove_bound(struct rds_sock *rs)
 {
 	unsigned long flags;
+	struct bind_bucket *bucket =
+		hash_to_bucket(rs->rs_bound_addr, rs->rs_bound_port);
 
-	spin_lock_irqsave(&rds_bind_lock, flags);
+	write_lock_irqsave(&bucket->lock, flags);
 
 	if (rs->rs_bound_addr) {
 		rdsdebug("rs %p unbinding from %pI4:%d\n",
 		  rs, &rs->rs_bound_addr,
 		  ntohs(rs->rs_bound_port));
 
-		hlist_del_init_rcu(&rs->rs_bound_node);
+		hlist_del_init(&rs->rs_bound_node);
 		rds_sock_put(rs);
 		rs->rs_bound_addr = 0;
 	}
 
-	spin_unlock_irqrestore(&rds_bind_lock, flags);
+	write_unlock_irqrestore(&bucket->lock, flags);
 }
 
 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
@@ -182,7 +196,14 @@
 		goto out;
 
 	if (rs->rs_transport) { /* previously bound */
-		ret = 0;
+		trans = rs->rs_transport;
+		if (trans->laddr_check(sock_net(sock->sk),
+				       sin->sin_addr.s_addr) != 0) {
+			ret = -ENOPROTOOPT;
+			rds_remove_bound(rs);
+		} else {
+			ret = 0;
+		}
 		goto out;
 	}
 	trans = rds_trans_get_preferred(sock_net(sock->sk),
@@ -200,9 +221,13 @@
 
 out:
 	release_sock(sk);
-
-	/* we might have called rds_remove_bound on error */
-	if (ret)
-		synchronize_rcu();
 	return ret;
 }
+
+void rds_bind_lock_init(void)
+{
+	int i;
+
+	for (i = 0; i < BIND_HASH_SIZE; i++)
+		rwlock_init(&bind_hash_table[i].lock);
+}
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 49adeef..d456403 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -128,10 +128,7 @@
 	struct rds_transport *loop_trans;
 	unsigned long flags;
 	int ret;
-	struct rds_transport *otrans = trans;
 
-	if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
-		goto new_conn;
 	rcu_read_lock();
 	conn = rds_conn_lookup(net, head, laddr, faddr, trans);
 	if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
@@ -147,7 +144,6 @@
 	if (conn)
 		goto out;
 
-new_conn:
 	conn = kmem_cache_zalloc(rds_conn_slab, gfp);
 	if (!conn) {
 		conn = ERR_PTR(-ENOMEM);
@@ -207,6 +203,7 @@
 
 	atomic_set(&conn->c_state, RDS_CONN_DOWN);
 	conn->c_send_gen = 0;
+	conn->c_outgoing = (is_outgoing ? 1 : 0);
 	conn->c_reconnect_jiffies = 0;
 	INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
 	INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
@@ -243,22 +240,13 @@
 		/* Creating normal conn */
 		struct rds_connection *found;
 
-		if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
-			found = NULL;
-		else
-			found = rds_conn_lookup(net, head, laddr, faddr, trans);
+		found = rds_conn_lookup(net, head, laddr, faddr, trans);
 		if (found) {
 			trans->conn_free(conn->c_transport_data);
 			kmem_cache_free(rds_conn_slab, conn);
 			conn = found;
 		} else {
-			if ((is_outgoing && otrans->t_type == RDS_TRANS_TCP) ||
-			    (otrans->t_type != RDS_TRANS_TCP)) {
-				/* Only the active side should be added to
-				 * reconnect list for TCP.
-				 */
-				hlist_add_head_rcu(&conn->c_hash_node, head);
-			}
+			hlist_add_head_rcu(&conn->c_hash_node, head);
 			rds_cong_add_conn(conn);
 			rds_conn_count++;
 		}
@@ -337,7 +325,9 @@
 	rcu_read_lock();
 	if (!hlist_unhashed(&conn->c_hash_node)) {
 		rcu_read_unlock();
-		rds_queue_reconnect(conn);
+		if (conn->c_trans->t_type != RDS_TRANS_TCP ||
+		    conn->c_outgoing == 1)
+			rds_queue_reconnect(conn);
 	} else {
 		rcu_read_unlock();
 	}
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 2d3f2ab..a833ab7 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -43,14 +43,14 @@
 #include "rds.h"
 #include "ib.h"
 
-static unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE;
-unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */
+unsigned int rds_ib_fmr_1m_pool_size = RDS_FMR_1M_POOL_SIZE;
+unsigned int rds_ib_fmr_8k_pool_size = RDS_FMR_8K_POOL_SIZE;
 unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
 
-module_param(fmr_pool_size, int, 0444);
-MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA");
-module_param(fmr_message_size, int, 0444);
-MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer");
+module_param(rds_ib_fmr_1m_pool_size, int, 0444);
+MODULE_PARM_DESC(rds_ib_fmr_1m_pool_size, " Max number of 1M fmr per HCA");
+module_param(rds_ib_fmr_8k_pool_size, int, 0444);
+MODULE_PARM_DESC(rds_ib_fmr_8k_pool_size, " Max number of 8K fmr per HCA");
 module_param(rds_ib_retry_count, int, 0444);
 MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
 
@@ -97,8 +97,10 @@
 	struct rds_ib_device *rds_ibdev = container_of(work,
 					struct rds_ib_device, free_work);
 
-	if (rds_ibdev->mr_pool)
-		rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
+	if (rds_ibdev->mr_8k_pool)
+		rds_ib_destroy_mr_pool(rds_ibdev->mr_8k_pool);
+	if (rds_ibdev->mr_1m_pool)
+		rds_ib_destroy_mr_pool(rds_ibdev->mr_1m_pool);
 	if (rds_ibdev->pd)
 		ib_dealloc_pd(rds_ibdev->pd);
 
@@ -148,9 +150,13 @@
 	rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
 
 	rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32;
-	rds_ibdev->max_fmrs = dev_attr->max_fmr ?
-			min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
-			fmr_pool_size;
+	rds_ibdev->max_1m_fmrs = dev_attr->max_mr ?
+		min_t(unsigned int, (dev_attr->max_mr / 2),
+		      rds_ib_fmr_1m_pool_size) : rds_ib_fmr_1m_pool_size;
+
+	rds_ibdev->max_8k_fmrs = dev_attr->max_mr ?
+		min_t(unsigned int, ((dev_attr->max_mr / 2) * RDS_MR_8K_SCALE),
+		      rds_ib_fmr_8k_pool_size) : rds_ib_fmr_8k_pool_size;
 
 	rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom;
 	rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom;
@@ -162,12 +168,25 @@
 		goto put_dev;
 	}
 
-	rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev);
-	if (IS_ERR(rds_ibdev->mr_pool)) {
-		rds_ibdev->mr_pool = NULL;
+	rds_ibdev->mr_1m_pool =
+		rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_1M_POOL);
+	if (IS_ERR(rds_ibdev->mr_1m_pool)) {
+		rds_ibdev->mr_1m_pool = NULL;
 		goto put_dev;
 	}
 
+	rds_ibdev->mr_8k_pool =
+		rds_ib_create_mr_pool(rds_ibdev, RDS_IB_MR_8K_POOL);
+	if (IS_ERR(rds_ibdev->mr_8k_pool)) {
+		rds_ibdev->mr_8k_pool = NULL;
+		goto put_dev;
+	}
+
+	rdsdebug("RDS/IB: max_mr = %d, max_wrs = %d, max_sge = %d, fmr_max_remaps = %d, max_1m_fmrs = %d, max_8k_fmrs = %d\n",
+		 dev_attr->max_fmr, rds_ibdev->max_wrs, rds_ibdev->max_sge,
+		 rds_ibdev->fmr_max_remaps, rds_ibdev->max_1m_fmrs,
+		 rds_ibdev->max_8k_fmrs);
+
 	INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
 	INIT_LIST_HEAD(&rds_ibdev->conn_list);
 
diff --git a/net/rds/ib.h b/net/rds/ib.h
index aae60fd..f17d095 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -9,8 +9,11 @@
 #include "rds.h"
 #include "rdma_transport.h"
 
-#define RDS_FMR_SIZE			256
-#define RDS_FMR_POOL_SIZE		8192
+#define RDS_FMR_1M_POOL_SIZE		(8192 / 2)
+#define RDS_FMR_1M_MSG_SIZE		256
+#define RDS_FMR_8K_MSG_SIZE		2
+#define RDS_MR_8K_SCALE			(256 / (RDS_FMR_8K_MSG_SIZE + 1))
+#define RDS_FMR_8K_POOL_SIZE		(RDS_MR_8K_SCALE * (8192 / 2))
 
 #define RDS_IB_MAX_SGE			8
 #define RDS_IB_RECV_SGE 		2
@@ -24,6 +27,9 @@
 
 #define RDS_IB_RECYCLE_BATCH_COUNT	32
 
+#define RDS_IB_WC_MAX			32
+#define RDS_IB_SEND_OP			BIT_ULL(63)
+
 extern struct rw_semaphore rds_ib_devices_lock;
 extern struct list_head rds_ib_devices;
 
@@ -89,6 +95,20 @@
 	atomic_t	w_free_ctr;
 };
 
+/* Rings are posted with all the allocations they'll need to queue the
+ * incoming message to the receiving socket so this can't fail.
+ * All fragments start with a header, so we can make sure we're not receiving
+ * garbage, and we can tell a small 8 byte fragment from an ACK frame.
+ */
+struct rds_ib_ack_state {
+	u64		ack_next;
+	u64		ack_recv;
+	unsigned int	ack_required:1;
+	unsigned int	ack_next_valid:1;
+	unsigned int	ack_recv_valid:1;
+};
+
+
 struct rds_ib_device;
 
 struct rds_ib_connection {
@@ -102,6 +122,12 @@
 	struct ib_pd		*i_pd;
 	struct ib_cq		*i_send_cq;
 	struct ib_cq		*i_recv_cq;
+	struct ib_wc		i_send_wc[RDS_IB_WC_MAX];
+	struct ib_wc		i_recv_wc[RDS_IB_WC_MAX];
+
+	/* interrupt handling */
+	struct tasklet_struct	i_send_tasklet;
+	struct tasklet_struct	i_recv_tasklet;
 
 	/* tx */
 	struct rds_ib_work_ring	i_send_ring;
@@ -112,7 +138,6 @@
 	atomic_t		i_signaled_sends;
 
 	/* rx */
-	struct tasklet_struct	i_recv_tasklet;
 	struct mutex		i_recv_mutex;
 	struct rds_ib_work_ring	i_recv_ring;
 	struct rds_ib_incoming	*i_ibinc;
@@ -164,6 +189,12 @@
 struct rds_ib_ipaddr {
 	struct list_head	list;
 	__be32			ipaddr;
+	struct rcu_head		rcu;
+};
+
+enum {
+	RDS_IB_MR_8K_POOL,
+	RDS_IB_MR_1M_POOL,
 };
 
 struct rds_ib_device {
@@ -172,9 +203,12 @@
 	struct list_head	conn_list;
 	struct ib_device	*dev;
 	struct ib_pd		*pd;
-	struct rds_ib_mr_pool	*mr_pool;
-	unsigned int		fmr_max_remaps;
 	unsigned int		max_fmrs;
+	struct rds_ib_mr_pool	*mr_1m_pool;
+	struct rds_ib_mr_pool   *mr_8k_pool;
+	unsigned int		fmr_max_remaps;
+	unsigned int		max_8k_fmrs;
+	unsigned int		max_1m_fmrs;
 	int			max_sge;
 	unsigned int		max_wrs;
 	unsigned int		max_initiator_depth;
@@ -197,14 +231,14 @@
 struct rds_ib_statistics {
 	uint64_t	s_ib_connect_raced;
 	uint64_t	s_ib_listen_closed_stale;
-	uint64_t	s_ib_tx_cq_call;
+	uint64_t	s_ib_evt_handler_call;
+	uint64_t	s_ib_tasklet_call;
 	uint64_t	s_ib_tx_cq_event;
 	uint64_t	s_ib_tx_ring_full;
 	uint64_t	s_ib_tx_throttle;
 	uint64_t	s_ib_tx_sg_mapping_failure;
 	uint64_t	s_ib_tx_stalled;
 	uint64_t	s_ib_tx_credit_updates;
-	uint64_t	s_ib_rx_cq_call;
 	uint64_t	s_ib_rx_cq_event;
 	uint64_t	s_ib_rx_ring_empty;
 	uint64_t	s_ib_rx_refill_from_cq;
@@ -216,12 +250,18 @@
 	uint64_t	s_ib_ack_send_delayed;
 	uint64_t	s_ib_ack_send_piggybacked;
 	uint64_t	s_ib_ack_received;
-	uint64_t	s_ib_rdma_mr_alloc;
-	uint64_t	s_ib_rdma_mr_free;
-	uint64_t	s_ib_rdma_mr_used;
-	uint64_t	s_ib_rdma_mr_pool_flush;
-	uint64_t	s_ib_rdma_mr_pool_wait;
-	uint64_t	s_ib_rdma_mr_pool_depleted;
+	uint64_t	s_ib_rdma_mr_8k_alloc;
+	uint64_t	s_ib_rdma_mr_8k_free;
+	uint64_t	s_ib_rdma_mr_8k_used;
+	uint64_t	s_ib_rdma_mr_8k_pool_flush;
+	uint64_t	s_ib_rdma_mr_8k_pool_wait;
+	uint64_t	s_ib_rdma_mr_8k_pool_depleted;
+	uint64_t	s_ib_rdma_mr_1m_alloc;
+	uint64_t	s_ib_rdma_mr_1m_free;
+	uint64_t	s_ib_rdma_mr_1m_used;
+	uint64_t	s_ib_rdma_mr_1m_pool_flush;
+	uint64_t	s_ib_rdma_mr_1m_pool_wait;
+	uint64_t	s_ib_rdma_mr_1m_pool_depleted;
 	uint64_t	s_ib_atomic_cswp;
 	uint64_t	s_ib_atomic_fadd;
 };
@@ -273,7 +313,8 @@
 void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
 extern struct ib_client rds_ib_client;
 
-extern unsigned int fmr_message_size;
+extern unsigned int rds_ib_fmr_1m_pool_size;
+extern unsigned int rds_ib_fmr_8k_pool_size;
 extern unsigned int rds_ib_retry_count;
 
 extern spinlock_t ib_nodev_conns_lock;
@@ -303,7 +344,8 @@
 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
 void rds_ib_destroy_nodev_conns(void);
-struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
+struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev,
+					     int npages);
 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
@@ -323,7 +365,8 @@
 void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
 void rds_ib_inc_free(struct rds_incoming *inc);
 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
-void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
+void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
+			     struct rds_ib_ack_state *state);
 void rds_ib_recv_tasklet_fn(unsigned long data);
 void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
@@ -331,6 +374,7 @@
 void rds_ib_attempt_ack(struct rds_ib_connection *ic);
 void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
+void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
 
 /* ib_ring.c */
 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
@@ -348,7 +392,7 @@
 void rds_ib_xmit_complete(struct rds_connection *conn);
 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
 		unsigned int hdr_off, unsigned int sg, unsigned int off);
-void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
+void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
 void rds_ib_send_init_ring(struct rds_ib_connection *ic);
 void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 9043f5c..2b2370e 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -216,6 +216,96 @@
 		 event->event, ib_event_msg(event->event), data);
 }
 
+/* Plucking the oldest entry from the ring can be done concurrently with
+ * the thread refilling the ring.  Each ring operation is protected by
+ * spinlocks and the transient state of refilling doesn't change the
+ * recording of which entry is oldest.
+ *
+ * This relies on IB only calling one cq comp_handler for each cq so that
+ * there will only be one caller of rds_recv_incoming() per RDS connection.
+ */
+static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context)
+{
+	struct rds_connection *conn = context;
+	struct rds_ib_connection *ic = conn->c_transport_data;
+
+	rdsdebug("conn %p cq %p\n", conn, cq);
+
+	rds_ib_stats_inc(s_ib_evt_handler_call);
+
+	tasklet_schedule(&ic->i_recv_tasklet);
+}
+
+static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq,
+		    struct ib_wc *wcs,
+		    struct rds_ib_ack_state *ack_state)
+{
+	int nr;
+	int i;
+	struct ib_wc *wc;
+
+	while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) {
+		for (i = 0; i < nr; i++) {
+			wc = wcs + i;
+			rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
+				 (unsigned long long)wc->wr_id, wc->status,
+				 wc->byte_len, be32_to_cpu(wc->ex.imm_data));
+
+			if (wc->wr_id & RDS_IB_SEND_OP)
+				rds_ib_send_cqe_handler(ic, wc);
+			else
+				rds_ib_recv_cqe_handler(ic, wc, ack_state);
+		}
+	}
+}
+
+static void rds_ib_tasklet_fn_send(unsigned long data)
+{
+	struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
+	struct rds_connection *conn = ic->conn;
+	struct rds_ib_ack_state state;
+
+	rds_ib_stats_inc(s_ib_tasklet_call);
+
+	memset(&state, 0, sizeof(state));
+	poll_cq(ic, ic->i_send_cq, ic->i_send_wc, &state);
+	ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
+	poll_cq(ic, ic->i_send_cq, ic->i_send_wc, &state);
+
+	if (rds_conn_up(conn) &&
+	    (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
+	    test_bit(0, &conn->c_map_queued)))
+		rds_send_xmit(ic->conn);
+}
+
+static void rds_ib_tasklet_fn_recv(unsigned long data)
+{
+	struct rds_ib_connection *ic = (struct rds_ib_connection *)data;
+	struct rds_connection *conn = ic->conn;
+	struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
+	struct rds_ib_ack_state state;
+
+	if (!rds_ibdev)
+		rds_conn_drop(conn);
+
+	rds_ib_stats_inc(s_ib_tasklet_call);
+
+	memset(&state, 0, sizeof(state));
+	poll_cq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
+	ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
+	poll_cq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
+
+	if (state.ack_next_valid)
+		rds_ib_set_ack(ic, state.ack_next, state.ack_required);
+	if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
+		rds_send_drop_acked(conn, state.ack_recv, NULL);
+		ic->i_ack_recv = state.ack_recv;
+	}
+
+	if (rds_conn_up(conn))
+		rds_ib_attempt_ack(ic);
+}
+
 static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
 {
 	struct rds_connection *conn = data;
@@ -238,6 +328,18 @@
 	}
 }
 
+static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
+{
+	struct rds_connection *conn = context;
+	struct rds_ib_connection *ic = conn->c_transport_data;
+
+	rdsdebug("conn %p cq %p\n", conn, cq);
+
+	rds_ib_stats_inc(s_ib_evt_handler_call);
+
+	tasklet_schedule(&ic->i_send_tasklet);
+}
+
 /*
  * This needs to be very careful to not leave IS_ERR pointers around for
  * cleanup to trip over.
@@ -271,7 +373,8 @@
 	ic->i_pd = rds_ibdev->pd;
 
 	cq_attr.cqe = ic->i_send_ring.w_nr + 1;
-	ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler,
+
+	ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
 				     rds_ib_cq_event_handler, conn,
 				     &cq_attr);
 	if (IS_ERR(ic->i_send_cq)) {
@@ -282,7 +385,7 @@
 	}
 
 	cq_attr.cqe = ic->i_recv_ring.w_nr;
-	ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler,
+	ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
 				     rds_ib_cq_event_handler, conn,
 				     &cq_attr);
 	if (IS_ERR(ic->i_recv_cq)) {
@@ -637,6 +740,7 @@
 		wait_event(rds_ib_ring_empty_wait,
 			   rds_ib_ring_empty(&ic->i_recv_ring) &&
 			   (atomic_read(&ic->i_signaled_sends) == 0));
+		tasklet_kill(&ic->i_send_tasklet);
 		tasklet_kill(&ic->i_recv_tasklet);
 
 		/* first destroy the ib state that generates callbacks */
@@ -743,8 +847,10 @@
 	}
 
 	INIT_LIST_HEAD(&ic->ib_node);
-	tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
-		     (unsigned long) ic);
+	tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send,
+		     (unsigned long)ic);
+	tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv,
+		     (unsigned long)ic);
 	mutex_init(&ic->i_recv_mutex);
 #ifndef KERNEL_HAS_ATOMIC64
 	spin_lock_init(&ic->i_ack_lock);
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 251d1ce..a234074 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -65,6 +65,7 @@
  * Our own little FMR pool
  */
 struct rds_ib_mr_pool {
+	unsigned int            pool_type;
 	struct mutex		flush_lock;		/* serialize fmr invalidate */
 	struct delayed_work	flush_worker;		/* flush worker */
 
@@ -83,7 +84,7 @@
 	struct ib_fmr_attr	fmr_attr;
 };
 
-struct workqueue_struct *rds_ib_fmr_wq;
+static struct workqueue_struct *rds_ib_fmr_wq;
 
 int rds_ib_fmr_init(void)
 {
@@ -159,10 +160,8 @@
 	}
 	spin_unlock_irq(&rds_ibdev->spinlock);
 
-	if (to_free) {
-		synchronize_rcu();
-		kfree(to_free);
-	}
+	if (to_free)
+		kfree_rcu(to_free, rcu);
 }
 
 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
@@ -236,7 +235,8 @@
 		rds_conn_destroy(ic->conn);
 }
 
-struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
+struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
+					     int pool_type)
 {
 	struct rds_ib_mr_pool *pool;
 
@@ -244,6 +244,7 @@
 	if (!pool)
 		return ERR_PTR(-ENOMEM);
 
+	pool->pool_type = pool_type;
 	init_llist_head(&pool->free_list);
 	init_llist_head(&pool->drop_list);
 	init_llist_head(&pool->clean_list);
@@ -251,28 +252,30 @@
 	init_waitqueue_head(&pool->flush_wait);
 	INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
 
-	pool->fmr_attr.max_pages = fmr_message_size;
+	if (pool_type == RDS_IB_MR_1M_POOL) {
+		/* +1 allows for unaligned MRs */
+		pool->fmr_attr.max_pages = RDS_FMR_1M_MSG_SIZE + 1;
+		pool->max_items = RDS_FMR_1M_POOL_SIZE;
+	} else {
+		/* pool_type == RDS_IB_MR_8K_POOL */
+		pool->fmr_attr.max_pages = RDS_FMR_8K_MSG_SIZE + 1;
+		pool->max_items = RDS_FMR_8K_POOL_SIZE;
+	}
+
+	pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
 	pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
 	pool->fmr_attr.page_shift = PAGE_SHIFT;
-	pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
-
-	/* We never allow more than max_items MRs to be allocated.
-	 * When we exceed more than max_items_soft, we start freeing
-	 * items more aggressively.
-	 * Make sure that max_items > max_items_soft > max_items / 2
-	 */
 	pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
-	pool->max_items = rds_ibdev->max_fmrs;
 
 	return pool;
 }
 
 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
 {
-	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
+	struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
 
-	iinfo->rdma_mr_max = pool->max_items;
-	iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
+	iinfo->rdma_mr_max = pool_1m->max_items;
+	iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
 }
 
 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
@@ -314,14 +317,28 @@
 	}
 }
 
-static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
+static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev,
+					  int npages)
 {
-	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
+	struct rds_ib_mr_pool *pool;
 	struct rds_ib_mr *ibmr = NULL;
 	int err = 0, iter = 0;
 
+	if (npages <= RDS_FMR_8K_MSG_SIZE)
+		pool = rds_ibdev->mr_8k_pool;
+	else
+		pool = rds_ibdev->mr_1m_pool;
+
 	if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
-		schedule_delayed_work(&pool->flush_worker, 10);
+		queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
+
+	/* Switch pools if one of the pool is reaching upper limit */
+	if (atomic_read(&pool->dirty_count) >=  pool->max_items * 9 / 10) {
+		if (pool->pool_type == RDS_IB_MR_8K_POOL)
+			pool = rds_ibdev->mr_1m_pool;
+		else
+			pool = rds_ibdev->mr_8k_pool;
+	}
 
 	while (1) {
 		ibmr = rds_ib_reuse_fmr(pool);
@@ -343,12 +360,18 @@
 		atomic_dec(&pool->item_count);
 
 		if (++iter > 2) {
-			rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
+			if (pool->pool_type == RDS_IB_MR_8K_POOL)
+				rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
+			else
+				rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
 			return ERR_PTR(-EAGAIN);
 		}
 
 		/* We do have some empty MRs. Flush them out. */
-		rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
+		if (pool->pool_type == RDS_IB_MR_8K_POOL)
+			rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
+		else
+			rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
 		rds_ib_flush_mr_pool(pool, 0, &ibmr);
 		if (ibmr)
 			return ibmr;
@@ -373,7 +396,12 @@
 		goto out_no_cigar;
 	}
 
-	rds_ib_stats_inc(s_ib_rdma_mr_alloc);
+	ibmr->pool = pool;
+	if (pool->pool_type == RDS_IB_MR_8K_POOL)
+		rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
+	else
+		rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
+
 	return ibmr;
 
 out_no_cigar:
@@ -429,7 +457,7 @@
 	}
 
 	page_cnt += len >> PAGE_SHIFT;
-	if (page_cnt > fmr_message_size)
+	if (page_cnt > ibmr->pool->fmr_attr.max_pages)
 		return -EINVAL;
 
 	dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
@@ -461,7 +489,10 @@
 	ibmr->sg_dma_len = sg_dma_len;
 	ibmr->remap_count++;
 
-	rds_ib_stats_inc(s_ib_rdma_mr_used);
+	if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
+		rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
+	else
+		rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
 	ret = 0;
 
 out:
@@ -524,8 +555,7 @@
 
 	__rds_ib_teardown_mr(ibmr);
 	if (pinned) {
-		struct rds_ib_device *rds_ibdev = ibmr->device;
-		struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
+		struct rds_ib_mr_pool *pool = ibmr->pool;
 
 		atomic_sub(pinned, &pool->free_pinned);
 	}
@@ -594,7 +624,7 @@
  * to free as many MRs as needed to get back to this limit.
  */
 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
-			        int free_all, struct rds_ib_mr **ibmr_ret)
+				int free_all, struct rds_ib_mr **ibmr_ret)
 {
 	struct rds_ib_mr *ibmr, *next;
 	struct llist_node *clean_nodes;
@@ -605,11 +635,14 @@
 	unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
 	int ret = 0;
 
-	rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
+	if (pool->pool_type == RDS_IB_MR_8K_POOL)
+		rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
+	else
+		rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
 
 	if (ibmr_ret) {
 		DEFINE_WAIT(wait);
-		while(!mutex_trylock(&pool->flush_lock)) {
+		while (!mutex_trylock(&pool->flush_lock)) {
 			ibmr = rds_ib_reuse_fmr(pool);
 			if (ibmr) {
 				*ibmr_ret = ibmr;
@@ -666,8 +699,12 @@
 	list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
 		unpinned += ibmr->sg_len;
 		__rds_ib_teardown_mr(ibmr);
-		if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
-			rds_ib_stats_inc(s_ib_rdma_mr_free);
+		if (nfreed < free_goal ||
+		    ibmr->remap_count >= pool->fmr_attr.max_maps) {
+			if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
+				rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
+			else
+				rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
 			list_del(&ibmr->unmap_list);
 			ib_dealloc_fmr(ibmr->fmr);
 			kfree(ibmr);
@@ -719,8 +756,8 @@
 void rds_ib_free_mr(void *trans_private, int invalidate)
 {
 	struct rds_ib_mr *ibmr = trans_private;
+	struct rds_ib_mr_pool *pool = ibmr->pool;
 	struct rds_ib_device *rds_ibdev = ibmr->device;
-	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
 
 	rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
 
@@ -759,10 +796,11 @@
 
 	down_read(&rds_ib_devices_lock);
 	list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
-		struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
+		if (rds_ibdev->mr_8k_pool)
+			rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
 
-		if (pool)
-			rds_ib_flush_mr_pool(pool, 0, NULL);
+		if (rds_ibdev->mr_1m_pool)
+			rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
 	}
 	up_read(&rds_ib_devices_lock);
 }
@@ -780,12 +818,12 @@
 		goto out;
 	}
 
-	if (!rds_ibdev->mr_pool) {
+	if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
 		ret = -ENODEV;
 		goto out;
 	}
 
-	ibmr = rds_ib_alloc_fmr(rds_ibdev);
+	ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
 	if (IS_ERR(ibmr)) {
 		rds_ib_dev_put(rds_ibdev);
 		return ibmr;
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index f43831e..96744b7 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -596,8 +596,7 @@
  * wr_id and avoids working with the ring in that case.
  */
 #ifndef KERNEL_HAS_ATOMIC64
-static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
-				int ack_required)
+void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
 {
 	unsigned long flags;
 
@@ -622,8 +621,7 @@
 	return seq;
 }
 #else
-static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
-				int ack_required)
+void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
 {
 	atomic64_set(&ic->i_ack_next, seq);
 	if (ack_required) {
@@ -830,20 +828,6 @@
 	rds_cong_map_updated(map, uncongested);
 }
 
-/*
- * Rings are posted with all the allocations they'll need to queue the
- * incoming message to the receiving socket so this can't fail.
- * All fragments start with a header, so we can make sure we're not receiving
- * garbage, and we can tell a small 8 byte fragment from an ACK frame.
- */
-struct rds_ib_ack_state {
-	u64		ack_next;
-	u64		ack_recv;
-	unsigned int	ack_required:1;
-	unsigned int	ack_next_valid:1;
-	unsigned int	ack_recv_valid:1;
-};
-
 static void rds_ib_process_recv(struct rds_connection *conn,
 				struct rds_ib_recv_work *recv, u32 data_len,
 				struct rds_ib_ack_state *state)
@@ -969,96 +953,50 @@
 	}
 }
 
-/*
- * Plucking the oldest entry from the ring can be done concurrently with
- * the thread refilling the ring.  Each ring operation is protected by
- * spinlocks and the transient state of refilling doesn't change the
- * recording of which entry is oldest.
- *
- * This relies on IB only calling one cq comp_handler for each cq so that
- * there will only be one caller of rds_recv_incoming() per RDS connection.
- */
-void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
-{
-	struct rds_connection *conn = context;
-	struct rds_ib_connection *ic = conn->c_transport_data;
-
-	rdsdebug("conn %p cq %p\n", conn, cq);
-
-	rds_ib_stats_inc(s_ib_rx_cq_call);
-
-	tasklet_schedule(&ic->i_recv_tasklet);
-}
-
-static inline void rds_poll_cq(struct rds_ib_connection *ic,
-			       struct rds_ib_ack_state *state)
+void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
+			     struct ib_wc *wc,
+			     struct rds_ib_ack_state *state)
 {
 	struct rds_connection *conn = ic->conn;
-	struct ib_wc wc;
 	struct rds_ib_recv_work *recv;
 
-	while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
-		rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
-			 (unsigned long long)wc.wr_id, wc.status,
-			 ib_wc_status_msg(wc.status), wc.byte_len,
-			 be32_to_cpu(wc.ex.imm_data));
-		rds_ib_stats_inc(s_ib_rx_cq_event);
+	rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
+		 (unsigned long long)wc->wr_id, wc->status,
+		 ib_wc_status_msg(wc->status), wc->byte_len,
+		 be32_to_cpu(wc->ex.imm_data));
 
-		recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
+	rds_ib_stats_inc(s_ib_rx_cq_event);
+	recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
+	ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1,
+			DMA_FROM_DEVICE);
 
-		ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
-
-		/*
-		 * Also process recvs in connecting state because it is possible
-		 * to get a recv completion _before_ the rdmacm ESTABLISHED
-		 * event is processed.
-		 */
-		if (wc.status == IB_WC_SUCCESS) {
-			rds_ib_process_recv(conn, recv, wc.byte_len, state);
-		} else {
-			/* We expect errors as the qp is drained during shutdown */
-			if (rds_conn_up(conn) || rds_conn_connecting(conn))
-				rds_ib_conn_error(conn, "recv completion on %pI4 had "
-						  "status %u (%s), disconnecting and "
-						  "reconnecting\n", &conn->c_faddr,
-						  wc.status,
-						  ib_wc_status_msg(wc.status));
-		}
-
-		/*
-		 * rds_ib_process_recv() doesn't always consume the frag, and
-		 * we might not have called it at all if the wc didn't indicate
-		 * success. We already unmapped the frag's pages, though, and
-		 * the following rds_ib_ring_free() call tells the refill path
-		 * that it will not find an allocated frag here. Make sure we
-		 * keep that promise by freeing a frag that's still on the ring.
-		 */
-		if (recv->r_frag) {
-			rds_ib_frag_free(ic, recv->r_frag);
-			recv->r_frag = NULL;
-		}
-		rds_ib_ring_free(&ic->i_recv_ring, 1);
+	/* Also process recvs in connecting state because it is possible
+	 * to get a recv completion _before_ the rdmacm ESTABLISHED
+	 * event is processed.
+	 */
+	if (wc->status == IB_WC_SUCCESS) {
+		rds_ib_process_recv(conn, recv, wc->byte_len, state);
+	} else {
+		/* We expect errors as the qp is drained during shutdown */
+		if (rds_conn_up(conn) || rds_conn_connecting(conn))
+			rds_ib_conn_error(conn, "recv completion on %pI4 had status %u (%s), disconnecting and reconnecting\n",
+					  &conn->c_faddr,
+					  wc->status,
+					  ib_wc_status_msg(wc->status));
 	}
-}
 
-void rds_ib_recv_tasklet_fn(unsigned long data)
-{
-	struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
-	struct rds_connection *conn = ic->conn;
-	struct rds_ib_ack_state state = { 0, };
-
-	rds_poll_cq(ic, &state);
-	ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
-	rds_poll_cq(ic, &state);
-
-	if (state.ack_next_valid)
-		rds_ib_set_ack(ic, state.ack_next, state.ack_required);
-	if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
-		rds_send_drop_acked(conn, state.ack_recv, NULL);
-		ic->i_ack_recv = state.ack_recv;
+	/* rds_ib_process_recv() doesn't always consume the frag, and
+	 * we might not have called it at all if the wc didn't indicate
+	 * success. We already unmapped the frag's pages, though, and
+	 * the following rds_ib_ring_free() call tells the refill path
+	 * that it will not find an allocated frag here. Make sure we
+	 * keep that promise by freeing a frag that's still on the ring.
+	 */
+	if (recv->r_frag) {
+		rds_ib_frag_free(ic, recv->r_frag);
+		recv->r_frag = NULL;
 	}
-	if (rds_conn_up(conn))
-		rds_ib_attempt_ack(ic);
+	rds_ib_ring_free(&ic->i_recv_ring, 1);
 
 	/* If we ever end up with a really empty receive ring, we're
 	 * in deep trouble, as the sender will definitely see RNR
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 4e88047..670882c 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -195,7 +195,7 @@
 
 		send->s_op = NULL;
 
-		send->s_wr.wr_id = i;
+		send->s_wr.wr_id = i | RDS_IB_SEND_OP;
 		send->s_wr.sg_list = send->s_sge;
 		send->s_wr.ex.imm_data = 0;
 
@@ -237,81 +237,73 @@
  * unallocs the next free entry in the ring it doesn't alter which is
  * the next to be freed, which is what this is concerned with.
  */
-void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
+void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
 {
-	struct rds_connection *conn = context;
-	struct rds_ib_connection *ic = conn->c_transport_data;
 	struct rds_message *rm = NULL;
-	struct ib_wc wc;
+	struct rds_connection *conn = ic->conn;
 	struct rds_ib_send_work *send;
 	u32 completed;
 	u32 oldest;
 	u32 i = 0;
-	int ret;
 	int nr_sig = 0;
 
-	rdsdebug("cq %p conn %p\n", cq, conn);
-	rds_ib_stats_inc(s_ib_tx_cq_call);
-	ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
-	if (ret)
-		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
 
-	while (ib_poll_cq(cq, 1, &wc) > 0) {
-		rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
-			 (unsigned long long)wc.wr_id, wc.status,
-			 ib_wc_status_msg(wc.status), wc.byte_len,
-			 be32_to_cpu(wc.ex.imm_data));
-		rds_ib_stats_inc(s_ib_tx_cq_event);
+	rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
+		 (unsigned long long)wc->wr_id, wc->status,
+		 ib_wc_status_msg(wc->status), wc->byte_len,
+		 be32_to_cpu(wc->ex.imm_data));
+	rds_ib_stats_inc(s_ib_tx_cq_event);
 
-		if (wc.wr_id == RDS_IB_ACK_WR_ID) {
-			if (time_after(jiffies, ic->i_ack_queued + HZ/2))
-				rds_ib_stats_inc(s_ib_tx_stalled);
-			rds_ib_ack_send_complete(ic);
-			continue;
-		}
+	if (wc->wr_id == RDS_IB_ACK_WR_ID) {
+		if (time_after(jiffies, ic->i_ack_queued + HZ / 2))
+			rds_ib_stats_inc(s_ib_tx_stalled);
+		rds_ib_ack_send_complete(ic);
+		return;
+	}
 
-		oldest = rds_ib_ring_oldest(&ic->i_send_ring);
+	oldest = rds_ib_ring_oldest(&ic->i_send_ring);
 
-		completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
+	completed = rds_ib_ring_completed(&ic->i_send_ring,
+					  (wc->wr_id & ~RDS_IB_SEND_OP),
+					  oldest);
 
-		for (i = 0; i < completed; i++) {
-			send = &ic->i_sends[oldest];
-			if (send->s_wr.send_flags & IB_SEND_SIGNALED)
-				nr_sig++;
+	for (i = 0; i < completed; i++) {
+		send = &ic->i_sends[oldest];
+		if (send->s_wr.send_flags & IB_SEND_SIGNALED)
+			nr_sig++;
 
-			rm = rds_ib_send_unmap_op(ic, send, wc.status);
+		rm = rds_ib_send_unmap_op(ic, send, wc->status);
 
-			if (time_after(jiffies, send->s_queued + HZ/2))
-				rds_ib_stats_inc(s_ib_tx_stalled);
+		if (time_after(jiffies, send->s_queued + HZ / 2))
+			rds_ib_stats_inc(s_ib_tx_stalled);
 
-			if (send->s_op) {
-				if (send->s_op == rm->m_final_op) {
-					/* If anyone waited for this message to get flushed out, wake
-					 * them up now */
-					rds_message_unmapped(rm);
-				}
-				rds_message_put(rm);
-				send->s_op = NULL;
+		if (send->s_op) {
+			if (send->s_op == rm->m_final_op) {
+				/* If anyone waited for this message to get
+				 * flushed out, wake them up now
+				 */
+				rds_message_unmapped(rm);
 			}
-
-			oldest = (oldest + 1) % ic->i_send_ring.w_nr;
+			rds_message_put(rm);
+			send->s_op = NULL;
 		}
 
-		rds_ib_ring_free(&ic->i_send_ring, completed);
-		rds_ib_sub_signaled(ic, nr_sig);
-		nr_sig = 0;
+		oldest = (oldest + 1) % ic->i_send_ring.w_nr;
+	}
 
-		if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
-		    test_bit(0, &conn->c_map_queued))
-			queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+	rds_ib_ring_free(&ic->i_send_ring, completed);
+	rds_ib_sub_signaled(ic, nr_sig);
+	nr_sig = 0;
 
-		/* We expect errors as the qp is drained during shutdown */
-		if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
-			rds_ib_conn_error(conn, "send completion on %pI4 had status "
-					  "%u (%s), disconnecting and reconnecting\n",
-					  &conn->c_faddr, wc.status,
-					  ib_wc_status_msg(wc.status));
-		}
+	if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
+	    test_bit(0, &conn->c_map_queued))
+		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+
+	/* We expect errors as the qp is drained during shutdown */
+	if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
+		rds_ib_conn_error(conn, "send completion on %pI4 had status %u (%s), disconnecting and reconnecting\n",
+				  &conn->c_faddr, wc->status,
+				  ib_wc_status_msg(wc->status));
 	}
 }
 
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index 2d5965d..d77e044 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -42,14 +42,14 @@
 static const char *const rds_ib_stat_names[] = {
 	"ib_connect_raced",
 	"ib_listen_closed_stale",
-	"ib_tx_cq_call",
+	"s_ib_evt_handler_call",
+	"ib_tasklet_call",
 	"ib_tx_cq_event",
 	"ib_tx_ring_full",
 	"ib_tx_throttle",
 	"ib_tx_sg_mapping_failure",
 	"ib_tx_stalled",
 	"ib_tx_credit_updates",
-	"ib_rx_cq_call",
 	"ib_rx_cq_event",
 	"ib_rx_ring_empty",
 	"ib_rx_refill_from_cq",
@@ -61,12 +61,18 @@
 	"ib_ack_send_delayed",
 	"ib_ack_send_piggybacked",
 	"ib_ack_received",
-	"ib_rdma_mr_alloc",
-	"ib_rdma_mr_free",
-	"ib_rdma_mr_used",
-	"ib_rdma_mr_pool_flush",
-	"ib_rdma_mr_pool_wait",
-	"ib_rdma_mr_pool_depleted",
+	"ib_rdma_mr_8k_alloc",
+	"ib_rdma_mr_8k_free",
+	"ib_rdma_mr_8k_used",
+	"ib_rdma_mr_8k_pool_flush",
+	"ib_rdma_mr_8k_pool_wait",
+	"ib_rdma_mr_8k_pool_depleted",
+	"ib_rdma_mr_1m_alloc",
+	"ib_rdma_mr_1m_free",
+	"ib_rdma_mr_1m_used",
+	"ib_rdma_mr_1m_pool_flush",
+	"ib_rdma_mr_1m_pool_wait",
+	"ib_rdma_mr_1m_pool_depleted",
 	"ib_atomic_cswp",
 	"ib_atomic_fadd",
 };
diff --git a/net/rds/rds.h b/net/rds/rds.h
index afb4048..543c308 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -86,7 +86,9 @@
 	struct hlist_node	c_hash_node;
 	__be32			c_laddr;
 	__be32			c_faddr;
-	unsigned int		c_loopback:1;
+	unsigned int		c_loopback:1,
+				c_outgoing:1,
+				c_pad_to_32:30;
 	struct rds_connection	*c_passive;
 
 	struct rds_cong_map	*c_lcong;
@@ -603,6 +605,7 @@
 int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
 void rds_remove_bound(struct rds_sock *rs);
 struct rds_sock *rds_find_bound(__be32 addr, __be16 port);
+void rds_bind_lock_init(void);
 
 /* cong.c */
 int rds_cong_get_maps(struct rds_connection *conn);
diff --git a/net/rds/send.c b/net/rds/send.c
index 4df61a5..827155c 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -38,6 +38,7 @@
 #include <linux/list.h>
 #include <linux/ratelimit.h>
 #include <linux/export.h>
+#include <linux/sizes.h>
 
 #include "rds.h"
 
@@ -51,7 +52,7 @@
  * it to 0 will restore the old behavior (where we looped until we had
  * drained the queue).
  */
-static int send_batch_count = 64;
+static int send_batch_count = SZ_1K;
 module_param(send_batch_count, int, 0444);
 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
 
@@ -223,7 +224,7 @@
 			 * through a lot of messages, lets back off and see
 			 * if anyone else jumps in
 			 */
-			if (batch_count >= 1024)
+			if (batch_count >= send_batch_count)
 				goto over_batch;
 
 			spin_lock_irqsave(&conn->c_lock, flags);
@@ -423,12 +424,15 @@
 		     !list_empty(&conn->c_send_queue)) &&
 		    send_gen == conn->c_send_gen) {
 			rds_stats_inc(s_send_lock_queue_raced);
-			goto restart;
+			if (batch_count < send_batch_count)
+				goto restart;
+			queue_delayed_work(rds_wq, &conn->c_send_w, 1);
 		}
 	}
 out:
 	return ret;
 }
+EXPORT_SYMBOL_GPL(rds_send_xmit);
 
 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
 {
@@ -1120,8 +1124,9 @@
 	 */
 	rds_stats_inc(s_send_queued);
 
-	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
-		rds_send_xmit(conn);
+	ret = rds_send_xmit(conn);
+	if (ret == -ENOMEM || ret == -EAGAIN)
+		queue_delayed_work(rds_wq, &conn->c_send_w, 1);
 
 	rds_message_put(rm);
 	return payload_len;
@@ -1177,8 +1182,8 @@
 	rds_stats_inc(s_send_queued);
 	rds_stats_inc(s_send_pong);
 
-	if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
-		queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+	/* schedule the send work on rds_wq */
+	queue_delayed_work(rds_wq, &conn->c_send_w, 1);
 
 	rds_message_put(rm);
 	return 0;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index c42b60b..9d6ddba 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -67,21 +67,13 @@
 	set_fs(oldfs);
 }
 
+/* All module specific customizations to the RDS-TCP socket should be done in
+ * rds_tcp_tune() and applied after socket creation. In general these
+ * customizations should be tunable via module_param()
+ */
 void rds_tcp_tune(struct socket *sock)
 {
-	struct sock *sk = sock->sk;
-
 	rds_tcp_nonagle(sock);
-
-	/*
-	 * We're trying to saturate gigabit with the default,
-	 * see svc_sock_setbufsize().
-	 */
-	lock_sock(sk);
-	sk->sk_sndbuf = RDS_TCP_DEFAULT_BUFSIZE;
-	sk->sk_rcvbuf = RDS_TCP_DEFAULT_BUFSIZE;
-	sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
-	release_sock(sk);
 }
 
 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 444d78d..0936a4a 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -110,28 +110,27 @@
 		goto out;
 	}
 	/* An incoming SYN request came in, and TCP just accepted it.
-	 * We always create a new conn for listen side of TCP, and do not
-	 * add it to the c_hash_list.
 	 *
 	 * If the client reboots, this conn will need to be cleaned up.
 	 * rds_tcp_state_change() will do that cleanup
 	 */
 	rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
-	WARN_ON(!rs_tcp || rs_tcp->t_sock);
+	if (rs_tcp->t_sock &&
+	    ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
+		struct sock *nsk = new_sock->sk;
 
-	/*
-	 * see the comment above rds_queue_delayed_reconnect()
-	 */
-	if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
-		if (rds_conn_state(conn) == RDS_CONN_UP)
-			rds_tcp_stats_inc(s_tcp_listen_closed_stale);
-		else
-			rds_tcp_stats_inc(s_tcp_connect_raced);
-		rds_conn_drop(conn);
+		nsk->sk_user_data = NULL;
+		nsk->sk_prot->disconnect(nsk, 0);
+		tcp_done(nsk);
+		new_sock = NULL;
 		ret = 0;
 		goto out;
+	} else if (rs_tcp->t_sock) {
+		rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+		conn->c_outgoing = 0;
 	}
 
+	rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
 	rds_tcp_set_callbacks(new_sock, conn);
 	rds_connect_complete(conn);
 	new_sock = NULL;
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 53b17ca..2894e60 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -83,6 +83,7 @@
 	struct rds_tcp_connection *tc = conn->c_transport_data;
 	int done = 0;
 	int ret = 0;
+	int more;
 
 	if (hdr_off == 0) {
 		/*
@@ -116,12 +117,15 @@
 			goto out;
 	}
 
+	more = rm->data.op_nents > 1 ? (MSG_MORE | MSG_SENDPAGE_NOTLAST) : 0;
 	while (sg < rm->data.op_nents) {
+		int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;
+
 		ret = tc->t_sock->ops->sendpage(tc->t_sock,
 						sg_page(&rm->data.op_sg[sg]),
 						rm->data.op_sg[sg].offset + off,
 						rm->data.op_sg[sg].length - off,
-						MSG_DONTWAIT|MSG_NOSIGNAL);
+						flags);
 		rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
 			 rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
 			 ret);
@@ -134,6 +138,8 @@
 			off = 0;
 			sg++;
 		}
+		if (sg == rm->data.op_nents - 1)
+			more = 0;
 	}
 
 out:
diff --git a/net/rds/threads.c b/net/rds/threads.c
index dc2402e..454aa6d 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -162,7 +162,9 @@
 	int ret;
 
 	if (rds_conn_state(conn) == RDS_CONN_UP) {
+		clear_bit(RDS_LL_SEND_FULL, &conn->c_flags);
 		ret = rds_send_xmit(conn);
+		cond_resched();
 		rdsdebug("conn %p ret %d\n", conn, ret);
 		switch (ret) {
 		case -EAGAIN:
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 2d1be4a..32fcdec 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -31,13 +31,17 @@
 
 #define MIRRED_TAB_MASK     7
 static LIST_HEAD(mirred_list);
+static DEFINE_SPINLOCK(mirred_list_lock);
 
 static void tcf_mirred_release(struct tc_action *a, int bind)
 {
 	struct tcf_mirred *m = to_mirred(a);
 	struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
 
+	/* We could be called either in a RCU callback or with RTNL lock held. */
+	spin_lock_bh(&mirred_list_lock);
 	list_del(&m->tcfm_list);
+	spin_unlock_bh(&mirred_list_lock);
 	if (dev)
 		dev_put(dev);
 }
@@ -103,10 +107,10 @@
 	} else {
 		if (bind)
 			return 0;
-		if (!ovr) {
-			tcf_hash_release(a, bind);
+
+		tcf_hash_release(a, bind);
+		if (!ovr)
 			return -EEXIST;
-		}
 	}
 	m = to_mirred(a);
 
@@ -123,7 +127,9 @@
 	}
 
 	if (ret == ACT_P_CREATED) {
+		spin_lock_bh(&mirred_list_lock);
 		list_add(&m->tcfm_list, &mirred_list);
+		spin_unlock_bh(&mirred_list_lock);
 		tcf_hash_insert(a);
 	}
 
@@ -173,6 +179,7 @@
 
 	skb2->skb_iif = skb->dev->ifindex;
 	skb2->dev = dev;
+	skb_sender_cpu_clear(skb2);
 	err = dev_queue_xmit(skb2);
 
 	if (err) {
@@ -221,7 +228,8 @@
 	struct tcf_mirred *m;
 
 	ASSERT_RTNL();
-	if (event == NETDEV_UNREGISTER)
+	if (event == NETDEV_UNREGISTER) {
+		spin_lock_bh(&mirred_list_lock);
 		list_for_each_entry(m, &mirred_list, tcfm_list) {
 			if (rcu_access_pointer(m->tcfm_dev) == dev) {
 				dev_put(dev);
@@ -231,6 +239,8 @@
 				RCU_INIT_POINTER(m->tcfm_dev, NULL);
 			}
 		}
+		spin_unlock_bh(&mirred_list_lock);
+	}
 
 	return NOTIFY_DONE;
 }
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 7eeffaf6..5faaa54 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -262,7 +262,8 @@
 	return 0;
 }
 
-static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog)
+static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
+				 const struct tcf_proto *tp)
 {
 	struct bpf_prog *fp;
 	char *name = NULL;
@@ -294,6 +295,9 @@
 	prog->bpf_name = name;
 	prog->filter = fp;
 
+	if (fp->dst_needed)
+		netif_keep_dst(qdisc_dev(tp->q));
+
 	return 0;
 }
 
@@ -330,7 +334,7 @@
 	prog->exts_integrated = have_exts;
 
 	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
-		       cls_bpf_prog_from_efd(tb, prog);
+		       cls_bpf_prog_from_efd(tb, prog, tp);
 	if (ret < 0) {
 		tcf_exts_destroy(&exts);
 		return ret;
diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
index 094a874..3fee70d 100644
--- a/net/sched/sch_blackhole.c
+++ b/net/sched/sch_blackhole.c
@@ -11,7 +11,7 @@
  * Note: Quantum tunneling is not supported.
  */
 
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
@@ -37,17 +37,8 @@
 	.owner		= THIS_MODULE,
 };
 
-static int __init blackhole_module_init(void)
+static int __init blackhole_init(void)
 {
 	return register_qdisc(&blackhole_qdisc_ops);
 }
-
-static void __exit blackhole_module_exit(void)
-{
-	unregister_qdisc(&blackhole_qdisc_ops);
-}
-
-module_init(blackhole_module_init)
-module_exit(blackhole_module_exit)
-
-MODULE_LICENSE("GPL");
+device_initcall(blackhole_init)
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index f377702..109b232 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -224,13 +224,16 @@
 	if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
 		return &q->internal;
 
-	/* SYNACK messages are attached to a listener socket.
-	 * 1) They are not part of a 'flow' yet
-	 * 2) We do not want to rate limit them (eg SYNFLOOD attack),
+	/* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
+	 * or a listener (SYNCOOKIE mode)
+	 * 1) request sockets are not full blown,
+	 *    they do not contain sk_pacing_rate
+	 * 2) They are not part of a 'flow' yet
+	 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
 	 *    especially if the listener set SO_MAX_PACING_RATE
-	 * 3) We pretend they are orphaned
+	 * 4) We pretend they are orphaned
 	 */
-	if (!sk || sk->sk_state == TCP_LISTEN) {
+	if (!sk || sk_listener(sk)) {
 		unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
 
 		/* By forcing low order bit to 1, we make sure to not
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 9d15cb6..86b04e3 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -368,6 +368,15 @@
 	return bucket - q->buckets;
 }
 
+static unsigned int hhf_qdisc_drop(struct Qdisc *sch)
+{
+	unsigned int prev_backlog;
+
+	prev_backlog = sch->qstats.backlog;
+	hhf_drop(sch);
+	return prev_backlog - sch->qstats.backlog;
+}
+
 static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
 	struct hhf_sched_data *q = qdisc_priv(sch);
@@ -696,7 +705,7 @@
 	.enqueue	=	hhf_enqueue,
 	.dequeue	=	hhf_dequeue,
 	.peek		=	qdisc_peek_dequeued,
-	.drop		=	hhf_drop,
+	.drop		=	hhf_qdisc_drop,
 	.init		=	hhf_init,
 	.reset		=	hhf_reset,
 	.destroy	=	hhf_destroy,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 197c3f5..b00f1f9 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1208,20 +1208,22 @@
  *   within this document.
  *
  * Our basic strategy is to round-robin transports in priorities
- * according to sctp_state_prio_map[] e.g., if no such
+ * according to sctp_trans_score() e.g., if no such
  * transport with state SCTP_ACTIVE exists, round-robin through
  * SCTP_UNKNOWN, etc. You get the picture.
  */
-static const u8 sctp_trans_state_to_prio_map[] = {
-	[SCTP_ACTIVE]	= 3,	/* best case */
-	[SCTP_UNKNOWN]	= 2,
-	[SCTP_PF]	= 1,
-	[SCTP_INACTIVE] = 0,	/* worst case */
-};
-
 static u8 sctp_trans_score(const struct sctp_transport *trans)
 {
-	return sctp_trans_state_to_prio_map[trans->state];
+	switch (trans->state) {
+	case SCTP_ACTIVE:
+		return 3;	/* best case */
+	case SCTP_UNKNOWN:
+		return 2;
+	case SCTP_PF:
+		return 1;
+	default: /* case SCTP_INACTIVE */
+		return 0;	/* worst case */
+	}
 }
 
 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 7954c52..763e06a 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2494,7 +2494,7 @@
 	__u16 sat;
 	int retval = 1;
 	sctp_scope_t scope;
-	time_t stale;
+	u32 stale;
 	struct sctp_af *af;
 	union sctp_addr_param *addr_param;
 	struct sctp_transport *t;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 35df126..6098d4c 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -244,12 +244,13 @@
 	int error;
 	struct sctp_transport *transport = (struct sctp_transport *) peer;
 	struct sctp_association *asoc = transport->asoc;
-	struct net *net = sock_net(asoc->base.sk);
+	struct sock *sk = asoc->base.sk;
+	struct net *net = sock_net(sk);
 
 	/* Check whether a task is in the sock.  */
 
-	bh_lock_sock(asoc->base.sk);
-	if (sock_owned_by_user(asoc->base.sk)) {
+	bh_lock_sock(sk);
+	if (sock_owned_by_user(sk)) {
 		pr_debug("%s: sock is busy\n", __func__);
 
 		/* Try again later.  */
@@ -272,10 +273,10 @@
 			   transport, GFP_ATOMIC);
 
 	if (error)
-		asoc->base.sk->sk_err = -error;
+		sk->sk_err = -error;
 
 out_unlock:
-	bh_unlock_sock(asoc->base.sk);
+	bh_unlock_sock(sk);
 	sctp_transport_put(transport);
 }
 
@@ -285,11 +286,12 @@
 static void sctp_generate_timeout_event(struct sctp_association *asoc,
 					sctp_event_timeout_t timeout_type)
 {
-	struct net *net = sock_net(asoc->base.sk);
+	struct sock *sk = asoc->base.sk;
+	struct net *net = sock_net(sk);
 	int error = 0;
 
-	bh_lock_sock(asoc->base.sk);
-	if (sock_owned_by_user(asoc->base.sk)) {
+	bh_lock_sock(sk);
+	if (sock_owned_by_user(sk)) {
 		pr_debug("%s: sock is busy: timer %d\n", __func__,
 			 timeout_type);
 
@@ -312,10 +314,10 @@
 			   (void *)timeout_type, GFP_ATOMIC);
 
 	if (error)
-		asoc->base.sk->sk_err = -error;
+		sk->sk_err = -error;
 
 out_unlock:
-	bh_unlock_sock(asoc->base.sk);
+	bh_unlock_sock(sk);
 	sctp_association_put(asoc);
 }
 
@@ -365,10 +367,11 @@
 	int error = 0;
 	struct sctp_transport *transport = (struct sctp_transport *) data;
 	struct sctp_association *asoc = transport->asoc;
-	struct net *net = sock_net(asoc->base.sk);
+	struct sock *sk = asoc->base.sk;
+	struct net *net = sock_net(sk);
 
-	bh_lock_sock(asoc->base.sk);
-	if (sock_owned_by_user(asoc->base.sk)) {
+	bh_lock_sock(sk);
+	if (sock_owned_by_user(sk)) {
 		pr_debug("%s: sock is busy\n", __func__);
 
 		/* Try again later.  */
@@ -388,11 +391,11 @@
 			   asoc->state, asoc->ep, asoc,
 			   transport, GFP_ATOMIC);
 
-	 if (error)
-		 asoc->base.sk->sk_err = -error;
+	if (error)
+		sk->sk_err = -error;
 
 out_unlock:
-	bh_unlock_sock(asoc->base.sk);
+	bh_unlock_sock(sk);
 	sctp_transport_put(transport);
 }
 
@@ -403,10 +406,11 @@
 {
 	struct sctp_transport *transport = (struct sctp_transport *) data;
 	struct sctp_association *asoc = transport->asoc;
-	struct net *net = sock_net(asoc->base.sk);
+	struct sock *sk = asoc->base.sk;
+	struct net *net = sock_net(sk);
 
-	bh_lock_sock(asoc->base.sk);
-	if (sock_owned_by_user(asoc->base.sk)) {
+	bh_lock_sock(sk);
+	if (sock_owned_by_user(sk)) {
 		pr_debug("%s: sock is busy\n", __func__);
 
 		/* Try again later.  */
@@ -427,7 +431,7 @@
 		   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
 
 out_unlock:
-	bh_unlock_sock(asoc->base.sk);
+	bh_unlock_sock(sk);
 	sctp_association_put(asoc);
 }
 
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index d7eaa73..6f46aa1 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2306,7 +2306,7 @@
 						 sctp_cmd_seq_t *commands)
 {
 	struct sctp_chunk *chunk = arg;
-	time_t stale;
+	u32 stale;
 	sctp_cookie_preserve_param_t bht;
 	sctp_errhdr_t *err;
 	struct sctp_chunk *reply;
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index cb25c89..f1e8daf 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -39,25 +39,6 @@
 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
 	    struct rpcrdma_create_data_internal *cdata)
 {
-	struct ib_device_attr *devattr = &ia->ri_devattr;
-	struct ib_mr *mr;
-
-	/* Obtain an lkey to use for the regbufs, which are
-	 * protected from remote access.
-	 */
-	if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
-		ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-	} else {
-		mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE);
-		if (IS_ERR(mr)) {
-			pr_err("%s: ib_get_dma_mr for failed with %lX\n",
-			       __func__, PTR_ERR(mr));
-			return -ENOMEM;
-		}
-		ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
-		ia->ri_dma_mr = mr;
-	}
-
 	return 0;
 }
 
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index d6653f5..5318951 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -189,11 +189,6 @@
 	struct ib_device_attr *devattr = &ia->ri_devattr;
 	int depth, delta;
 
-	/* Obtain an lkey to use for the regbufs, which are
-	 * protected from remote access.
-	 */
-	ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-
 	ia->ri_max_frmr_depth =
 			min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
 			      devattr->max_fast_reg_page_list_len);
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
index 72cf8b1..617b76f 100644
--- a/net/sunrpc/xprtrdma/physical_ops.c
+++ b/net/sunrpc/xprtrdma/physical_ops.c
@@ -23,7 +23,6 @@
 physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
 		 struct rpcrdma_create_data_internal *cdata)
 {
-	struct ib_device_attr *devattr = &ia->ri_devattr;
 	struct ib_mr *mr;
 
 	/* Obtain an rkey to use for RPC data payloads.
@@ -37,15 +36,8 @@
 		       __func__, PTR_ERR(mr));
 		return -ENOMEM;
 	}
+
 	ia->ri_dma_mr = mr;
-
-	/* Obtain an lkey to use for regbufs.
-	 */
-	if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
-		ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-	else
-		ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
-
 	return 0;
 }
 
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index cb51742..f0c3ff6 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -136,7 +136,8 @@
 	ctxt->direction = DMA_FROM_DEVICE;
 	ctxt->read_hdr = head;
 	pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
-	read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
+	read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
+		     rs_length);
 
 	for (pno = 0; pno < pages_needed; pno++) {
 		int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
@@ -235,7 +236,8 @@
 	ctxt->direction = DMA_FROM_DEVICE;
 	ctxt->frmr = frmr;
 	pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len);
-	read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
+	read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
+		     rs_length);
 
 	frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]);
 	frmr->direction = DMA_FROM_DEVICE;
@@ -531,7 +533,7 @@
 	rqstp->rq_arg.page_base = head->arg.page_base;
 
 	/* rq_respages starts after the last arg page */
-	rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
+	rqstp->rq_respages = &rqstp->rq_pages[page_no];
 	rqstp->rq_next_page = rqstp->rq_respages + 1;
 
 	/* Rebuild rq_arg head and tail. */
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 64443eb..41e452b 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -270,8 +270,8 @@
 
 	xprt_clear_connected(xprt);
 
-	rpcrdma_buffer_destroy(&r_xprt->rx_buf);
 	rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
+	rpcrdma_buffer_destroy(&r_xprt->rx_buf);
 	rpcrdma_ia_close(&r_xprt->rx_ia);
 
 	xprt_rdma_free_addresses(xprt);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 6829967..5502d4d 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -543,11 +543,8 @@
 	}
 
 	if (memreg == RPCRDMA_FRMR) {
-		/* Requires both frmr reg and local dma lkey */
-		if (((devattr->device_cap_flags &
-		     (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
-		    (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) ||
-		      (devattr->max_fast_reg_page_list_len == 0)) {
+		if (!(devattr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
+		    (devattr->max_fast_reg_page_list_len == 0)) {
 			dprintk("RPC:       %s: FRMR registration "
 				"not supported by HCA\n", __func__);
 			memreg = RPCRDMA_MTHCAFMR;
@@ -557,6 +554,7 @@
 		if (!ia->ri_device->alloc_fmr) {
 			dprintk("RPC:       %s: MTHCAFMR registration "
 				"not supported by HCA\n", __func__);
+			rc = -EINVAL;
 			goto out3;
 		}
 	}
@@ -755,19 +753,22 @@
 
 	cancel_delayed_work_sync(&ep->rep_connect_worker);
 
-	if (ia->ri_id->qp) {
+	if (ia->ri_id->qp)
 		rpcrdma_ep_disconnect(ep, ia);
+
+	rpcrdma_clean_cq(ep->rep_attr.recv_cq);
+	rpcrdma_clean_cq(ep->rep_attr.send_cq);
+
+	if (ia->ri_id->qp) {
 		rdma_destroy_qp(ia->ri_id);
 		ia->ri_id->qp = NULL;
 	}
 
-	rpcrdma_clean_cq(ep->rep_attr.recv_cq);
 	rc = ib_destroy_cq(ep->rep_attr.recv_cq);
 	if (rc)
 		dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
 			__func__, rc);
 
-	rpcrdma_clean_cq(ep->rep_attr.send_cq);
 	rc = ib_destroy_cq(ep->rep_attr.send_cq);
 	if (rc)
 		dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
@@ -1252,7 +1253,7 @@
 		goto out_free;
 
 	iov->length = size;
-	iov->lkey = ia->ri_dma_lkey;
+	iov->lkey = ia->ri_pd->local_dma_lkey;
 	rb->rg_size = size;
 	rb->rg_owner = NULL;
 	return rb;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 0251222..c09414e 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -65,7 +65,6 @@
 	struct rdma_cm_id 	*ri_id;
 	struct ib_pd		*ri_pd;
 	struct ib_mr		*ri_dma_mr;
-	u32			ri_dma_lkey;
 	struct completion	ri_done;
 	int			ri_async_rc;
 	unsigned int		ri_max_frmr_depth;
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 00ee547..6dfd19e 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -15,8 +15,11 @@
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 #include <linux/netdevice.h>
+#include <linux/etherdevice.h>
 #include <linux/if_bridge.h>
 #include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/if_vlan.h>
 #include <net/ip_fib.h>
 #include <net/switchdev.h>
 
@@ -92,6 +95,85 @@
 	switchdev_trans_items_destroy(trans);
 }
 
+static LIST_HEAD(deferred);
+static DEFINE_SPINLOCK(deferred_lock);
+
+typedef void switchdev_deferred_func_t(struct net_device *dev,
+				       const void *data);
+
+struct switchdev_deferred_item {
+	struct list_head list;
+	struct net_device *dev;
+	switchdev_deferred_func_t *func;
+	unsigned long data[0];
+};
+
+static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
+{
+	struct switchdev_deferred_item *dfitem;
+
+	spin_lock_bh(&deferred_lock);
+	if (list_empty(&deferred)) {
+		dfitem = NULL;
+		goto unlock;
+	}
+	dfitem = list_first_entry(&deferred,
+				  struct switchdev_deferred_item, list);
+	list_del(&dfitem->list);
+unlock:
+	spin_unlock_bh(&deferred_lock);
+	return dfitem;
+}
+
+/**
+ *	switchdev_deferred_process - Process ops in deferred queue
+ *
+ *	Called to flush the ops currently queued in deferred ops queue.
+ *	rtnl_lock must be held.
+ */
+void switchdev_deferred_process(void)
+{
+	struct switchdev_deferred_item *dfitem;
+
+	ASSERT_RTNL();
+
+	while ((dfitem = switchdev_deferred_dequeue())) {
+		dfitem->func(dfitem->dev, dfitem->data);
+		dev_put(dfitem->dev);
+		kfree(dfitem);
+	}
+}
+EXPORT_SYMBOL_GPL(switchdev_deferred_process);
+
+static void switchdev_deferred_process_work(struct work_struct *work)
+{
+	rtnl_lock();
+	switchdev_deferred_process();
+	rtnl_unlock();
+}
+
+static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
+
+static int switchdev_deferred_enqueue(struct net_device *dev,
+				      const void *data, size_t data_len,
+				      switchdev_deferred_func_t *func)
+{
+	struct switchdev_deferred_item *dfitem;
+
+	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
+	if (!dfitem)
+		return -ENOMEM;
+	dfitem->dev = dev;
+	dfitem->func = func;
+	memcpy(dfitem->data, data, data_len);
+	dev_hold(dev);
+	spin_lock_bh(&deferred_lock);
+	list_add_tail(&dfitem->list, &deferred);
+	spin_unlock_bh(&deferred_lock);
+	schedule_work(&deferred_process_work);
+	return 0;
+}
+
 /**
  *	switchdev_port_attr_get - Get port attribute
  *
@@ -104,7 +186,7 @@
 	struct net_device *lower_dev;
 	struct list_head *iter;
 	struct switchdev_attr first = {
-		.id = SWITCHDEV_ATTR_UNDEFINED
+		.id = SWITCHDEV_ATTR_ID_UNDEFINED
 	};
 	int err = -EOPNOTSUPP;
 
@@ -124,7 +206,7 @@
 		err = switchdev_port_attr_get(lower_dev, attr);
 		if (err)
 			break;
-		if (first.id == SWITCHDEV_ATTR_UNDEFINED)
+		if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
 			first = *attr;
 		else if (memcmp(&first, attr, sizeof(*attr)))
 			return -ENODATA;
@@ -135,7 +217,7 @@
 EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
 
 static int __switchdev_port_attr_set(struct net_device *dev,
-				     struct switchdev_attr *attr,
+				     const struct switchdev_attr *attr,
 				     struct switchdev_trans *trans)
 {
 	const struct switchdev_ops *ops = dev->switchdev_ops;
@@ -147,7 +229,7 @@
 		return ops->switchdev_port_attr_set(dev, attr, trans);
 
 	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
-		return err;
+		goto done;
 
 	/* Switch device port(s) may be stacked under
 	 * bond/team/vlan dev, so recurse down to set attr on
@@ -156,81 +238,26 @@
 
 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
 		err = __switchdev_port_attr_set(lower_dev, attr, trans);
+		if (err == -EOPNOTSUPP &&
+		    attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
+			continue;
 		if (err)
 			break;
 	}
 
+done:
+	if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
+		err = 0;
+
 	return err;
 }
 
-struct switchdev_attr_set_work {
-	struct work_struct work;
-	struct net_device *dev;
-	struct switchdev_attr attr;
-};
-
-static void switchdev_port_attr_set_work(struct work_struct *work)
-{
-	struct switchdev_attr_set_work *asw =
-		container_of(work, struct switchdev_attr_set_work, work);
-	int err;
-
-	rtnl_lock();
-	err = switchdev_port_attr_set(asw->dev, &asw->attr);
-	if (err && err != -EOPNOTSUPP)
-		netdev_err(asw->dev, "failed (err=%d) to set attribute (id=%d)\n",
-			   err, asw->attr.id);
-	rtnl_unlock();
-
-	dev_put(asw->dev);
-	kfree(work);
-}
-
-static int switchdev_port_attr_set_defer(struct net_device *dev,
-					 struct switchdev_attr *attr)
-{
-	struct switchdev_attr_set_work *asw;
-
-	asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
-	if (!asw)
-		return -ENOMEM;
-
-	INIT_WORK(&asw->work, switchdev_port_attr_set_work);
-
-	dev_hold(dev);
-	asw->dev = dev;
-	memcpy(&asw->attr, attr, sizeof(asw->attr));
-
-	schedule_work(&asw->work);
-
-	return 0;
-}
-
-/**
- *	switchdev_port_attr_set - Set port attribute
- *
- *	@dev: port device
- *	@attr: attribute to set
- *
- *	Use a 2-phase prepare-commit transaction model to ensure
- *	system is not left in a partially updated state due to
- *	failure from driver/device.
- */
-int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
+static int switchdev_port_attr_set_now(struct net_device *dev,
+				       const struct switchdev_attr *attr)
 {
 	struct switchdev_trans trans;
 	int err;
 
-	if (!rtnl_is_locked()) {
-		/* Running prepare-commit transaction across stacked
-		 * devices requires nothing moves, so if rtnl_lock is
-		 * not held, schedule a worker thread to hold rtnl_lock
-		 * while setting attr.
-		 */
-
-		return switchdev_port_attr_set_defer(dev, attr);
-	}
-
 	switchdev_trans_init(&trans);
 
 	/* Phase I: prepare for attr set. Driver/device should fail
@@ -267,10 +294,66 @@
 
 	return err;
 }
+
+static void switchdev_port_attr_set_deferred(struct net_device *dev,
+					     const void *data)
+{
+	const struct switchdev_attr *attr = data;
+	int err;
+
+	err = switchdev_port_attr_set_now(dev, attr);
+	if (err && err != -EOPNOTSUPP)
+		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
+			   err, attr->id);
+}
+
+static int switchdev_port_attr_set_defer(struct net_device *dev,
+					 const struct switchdev_attr *attr)
+{
+	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
+					  switchdev_port_attr_set_deferred);
+}
+
+/**
+ *	switchdev_port_attr_set - Set port attribute
+ *
+ *	@dev: port device
+ *	@attr: attribute to set
+ *
+ *	Use a 2-phase prepare-commit transaction model to ensure
+ *	system is not left in a partially updated state due to
+ *	failure from driver/device.
+ *
+ *	rtnl_lock must be held and must not be in atomic section,
+ *	in case SWITCHDEV_F_DEFER flag is not set.
+ */
+int switchdev_port_attr_set(struct net_device *dev,
+			    const struct switchdev_attr *attr)
+{
+	if (attr->flags & SWITCHDEV_F_DEFER)
+		return switchdev_port_attr_set_defer(dev, attr);
+	ASSERT_RTNL();
+	return switchdev_port_attr_set_now(dev, attr);
+}
 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
 
+static size_t switchdev_obj_size(const struct switchdev_obj *obj)
+{
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_ID_PORT_VLAN:
+		return sizeof(struct switchdev_obj_port_vlan);
+	case SWITCHDEV_OBJ_ID_IPV4_FIB:
+		return sizeof(struct switchdev_obj_ipv4_fib);
+	case SWITCHDEV_OBJ_ID_PORT_FDB:
+		return sizeof(struct switchdev_obj_port_fdb);
+	default:
+		BUG();
+	}
+	return 0;
+}
+
 static int __switchdev_port_obj_add(struct net_device *dev,
-				    struct switchdev_obj *obj,
+				    const struct switchdev_obj *obj,
 				    struct switchdev_trans *trans)
 {
 	const struct switchdev_ops *ops = dev->switchdev_ops;
@@ -295,19 +378,8 @@
 	return err;
 }
 
-/**
- *	switchdev_port_obj_add - Add port object
- *
- *	@dev: port device
- *	@obj: object to add
- *
- *	Use a 2-phase prepare-commit transaction model to ensure
- *	system is not left in a partially updated state due to
- *	failure from driver/device.
- *
- *	rtnl_lock must be held.
- */
-int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
+static int switchdev_port_obj_add_now(struct net_device *dev,
+				      const struct switchdev_obj *obj)
 {
 	struct switchdev_trans trans;
 	int err;
@@ -349,15 +421,52 @@
 
 	return err;
 }
-EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
+
+static void switchdev_port_obj_add_deferred(struct net_device *dev,
+					    const void *data)
+{
+	const struct switchdev_obj *obj = data;
+	int err;
+
+	err = switchdev_port_obj_add_now(dev, obj);
+	if (err && err != -EOPNOTSUPP)
+		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
+			   err, obj->id);
+}
+
+static int switchdev_port_obj_add_defer(struct net_device *dev,
+					const struct switchdev_obj *obj)
+{
+	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
+					  switchdev_port_obj_add_deferred);
+}
 
 /**
- *	switchdev_port_obj_del - Delete port object
+ *	switchdev_port_obj_add - Add port object
  *
  *	@dev: port device
- *	@obj: object to delete
+ *	@id: object ID
+ *	@obj: object to add
+ *
+ *	Use a 2-phase prepare-commit transaction model to ensure
+ *	system is not left in a partially updated state due to
+ *	failure from driver/device.
+ *
+ *	rtnl_lock must be held and must not be in atomic section,
+ *	in case SWITCHDEV_F_DEFER flag is not set.
  */
-int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj)
+int switchdev_port_obj_add(struct net_device *dev,
+			   const struct switchdev_obj *obj)
+{
+	if (obj->flags & SWITCHDEV_F_DEFER)
+		return switchdev_port_obj_add_defer(dev, obj);
+	ASSERT_RTNL();
+	return switchdev_port_obj_add_now(dev, obj);
+}
+EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
+
+static int switchdev_port_obj_del_now(struct net_device *dev,
+				      const struct switchdev_obj *obj)
 {
 	const struct switchdev_ops *ops = dev->switchdev_ops;
 	struct net_device *lower_dev;
@@ -373,30 +482,75 @@
 	 */
 
 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
-		err = switchdev_port_obj_del(lower_dev, obj);
+		err = switchdev_port_obj_del_now(lower_dev, obj);
 		if (err)
 			break;
 	}
 
 	return err;
 }
+
+static void switchdev_port_obj_del_deferred(struct net_device *dev,
+					    const void *data)
+{
+	const struct switchdev_obj *obj = data;
+	int err;
+
+	err = switchdev_port_obj_del_now(dev, obj);
+	if (err && err != -EOPNOTSUPP)
+		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
+			   err, obj->id);
+}
+
+static int switchdev_port_obj_del_defer(struct net_device *dev,
+					const struct switchdev_obj *obj)
+{
+	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
+					  switchdev_port_obj_del_deferred);
+}
+
+/**
+ *	switchdev_port_obj_del - Delete port object
+ *
+ *	@dev: port device
+ *	@id: object ID
+ *	@obj: object to delete
+ *
+ *	rtnl_lock must be held and must not be in atomic section,
+ *	in case SWITCHDEV_F_DEFER flag is not set.
+ */
+int switchdev_port_obj_del(struct net_device *dev,
+			   const struct switchdev_obj *obj)
+{
+	if (obj->flags & SWITCHDEV_F_DEFER)
+		return switchdev_port_obj_del_defer(dev, obj);
+	ASSERT_RTNL();
+	return switchdev_port_obj_del_now(dev, obj);
+}
 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
 
 /**
  *	switchdev_port_obj_dump - Dump port objects
  *
  *	@dev: port device
+ *	@id: object ID
  *	@obj: object to dump
+ *	@cb: function to call with a filled object
+ *
+ *	rtnl_lock must be held.
  */
-int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj)
+int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
+			    switchdev_obj_dump_cb_t *cb)
 {
 	const struct switchdev_ops *ops = dev->switchdev_ops;
 	struct net_device *lower_dev;
 	struct list_head *iter;
 	int err = -EOPNOTSUPP;
 
+	ASSERT_RTNL();
+
 	if (ops && ops->switchdev_port_obj_dump)
-		return ops->switchdev_port_obj_dump(dev, obj);
+		return ops->switchdev_port_obj_dump(dev, obj, cb);
 
 	/* Switch device port(s) may be stacked under
 	 * bond/team/vlan dev, so recurse down to dump objects on
@@ -404,7 +558,7 @@
 	 */
 
 	netdev_for_each_lower_dev(dev, lower_dev, iter) {
-		err = switchdev_port_obj_dump(lower_dev, obj);
+		err = switchdev_port_obj_dump(lower_dev, obj, cb);
 		break;
 	}
 
@@ -476,7 +630,7 @@
 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
 
 struct switchdev_vlan_dump {
-	struct switchdev_obj obj;
+	struct switchdev_obj_port_vlan vlan;
 	struct sk_buff *skb;
 	u32 filter_mask;
 	u16 flags;
@@ -484,8 +638,7 @@
 	u16 end;
 };
 
-static int switchdev_port_vlan_dump_put(struct net_device *dev,
-					struct switchdev_vlan_dump *dump)
+static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump)
 {
 	struct bridge_vlan_info vinfo;
 
@@ -515,12 +668,11 @@
 	return 0;
 }
 
-static int switchdev_port_vlan_dump_cb(struct net_device *dev,
-				       struct switchdev_obj *obj)
+static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj)
 {
+	struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
 	struct switchdev_vlan_dump *dump =
-		container_of(obj, struct switchdev_vlan_dump, obj);
-	struct switchdev_obj_vlan *vlan = &dump->obj.u.vlan;
+		container_of(vlan, struct switchdev_vlan_dump, vlan);
 	int err = 0;
 
 	if (vlan->vid_begin > vlan->vid_end)
@@ -531,7 +683,7 @@
 		for (dump->begin = dump->end = vlan->vid_begin;
 		     dump->begin <= vlan->vid_end;
 		     dump->begin++, dump->end++) {
-			err = switchdev_port_vlan_dump_put(dev, dump);
+			err = switchdev_port_vlan_dump_put(dump);
 			if (err)
 				return err;
 		}
@@ -543,7 +695,7 @@
 				/* prepend */
 				dump->begin = vlan->vid_begin;
 			} else {
-				err = switchdev_port_vlan_dump_put(dev, dump);
+				err = switchdev_port_vlan_dump_put(dump);
 				dump->flags = vlan->flags;
 				dump->begin = vlan->vid_begin;
 				dump->end = vlan->vid_end;
@@ -555,7 +707,7 @@
 				/* append */
 				dump->end = vlan->vid_end;
 			} else {
-				err = switchdev_port_vlan_dump_put(dev, dump);
+				err = switchdev_port_vlan_dump_put(dump);
 				dump->flags = vlan->flags;
 				dump->begin = vlan->vid_begin;
 				dump->end = vlan->vid_end;
@@ -572,10 +724,7 @@
 				    u32 filter_mask)
 {
 	struct switchdev_vlan_dump dump = {
-		.obj = {
-			.id = SWITCHDEV_OBJ_PORT_VLAN,
-			.cb = switchdev_port_vlan_dump_cb,
-		},
+		.vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
 		.skb = skb,
 		.filter_mask = filter_mask,
 	};
@@ -583,12 +732,13 @@
 
 	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
 	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
-		err = switchdev_port_obj_dump(dev, &dump.obj);
+		err = switchdev_port_obj_dump(dev, &dump.vlan.obj,
+					      switchdev_port_vlan_dump_cb);
 		if (err)
 			goto err_out;
 		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
 			/* last one */
-			err = switchdev_port_vlan_dump_put(dev, &dump);
+			err = switchdev_port_vlan_dump_put(&dump);
 	}
 
 err_out:
@@ -608,10 +758,10 @@
 				  int nlflags)
 {
 	struct switchdev_attr attr = {
-		.id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+		.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
 	};
 	u16 mode = BRIDGE_MODE_UNDEF;
-	u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
+	u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
 	int err;
 
 	err = switchdev_port_attr_get(dev, &attr);
@@ -629,7 +779,7 @@
 				     unsigned long brport_flag)
 {
 	struct switchdev_attr attr = {
-		.id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+		.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
 	};
 	u8 flag = nla_get_u8(nlattr);
 	int err;
@@ -682,6 +832,9 @@
 			err = switchdev_port_br_setflag(dev, attr,
 							BR_LEARNING_SYNC);
 			break;
+		case IFLA_BRPORT_UNICAST_FLOOD:
+			err = switchdev_port_br_setflag(dev, attr, BR_FLOOD);
+			break;
 		default:
 			err = -EOPNOTSUPP;
 			break;
@@ -696,14 +849,13 @@
 static int switchdev_port_br_afspec(struct net_device *dev,
 				    struct nlattr *afspec,
 				    int (*f)(struct net_device *dev,
-					     struct switchdev_obj *obj))
+					     const struct switchdev_obj *obj))
 {
 	struct nlattr *attr;
 	struct bridge_vlan_info *vinfo;
-	struct switchdev_obj obj = {
-		.id = SWITCHDEV_OBJ_PORT_VLAN,
+	struct switchdev_obj_port_vlan vlan = {
+		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
 	};
-	struct switchdev_obj_vlan *vlan = &obj.u.vlan;
 	int rem;
 	int err;
 
@@ -713,30 +865,35 @@
 		if (nla_len(attr) != sizeof(struct bridge_vlan_info))
 			return -EINVAL;
 		vinfo = nla_data(attr);
-		vlan->flags = vinfo->flags;
+		if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
+			return -EINVAL;
+		vlan.flags = vinfo->flags;
 		if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
-			if (vlan->vid_begin)
+			if (vlan.vid_begin)
 				return -EINVAL;
-			vlan->vid_begin = vinfo->vid;
+			vlan.vid_begin = vinfo->vid;
+			/* don't allow range of pvids */
+			if (vlan.flags & BRIDGE_VLAN_INFO_PVID)
+				return -EINVAL;
 		} else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
-			if (!vlan->vid_begin)
+			if (!vlan.vid_begin)
 				return -EINVAL;
-			vlan->vid_end = vinfo->vid;
-			if (vlan->vid_end <= vlan->vid_begin)
+			vlan.vid_end = vinfo->vid;
+			if (vlan.vid_end <= vlan.vid_begin)
 				return -EINVAL;
-			err = f(dev, &obj);
+			err = f(dev, &vlan.obj);
 			if (err)
 				return err;
-			memset(vlan, 0, sizeof(*vlan));
+			vlan.vid_begin = 0;
 		} else {
-			if (vlan->vid_begin)
+			if (vlan.vid_begin)
 				return -EINVAL;
-			vlan->vid_begin = vinfo->vid;
-			vlan->vid_end = vinfo->vid;
-			err = f(dev, &obj);
+			vlan.vid_begin = vinfo->vid;
+			vlan.vid_end = vinfo->vid;
+			err = f(dev, &vlan.obj);
 			if (err)
 				return err;
-			memset(vlan, 0, sizeof(*vlan));
+			vlan.vid_begin = 0;
 		}
 	}
 
@@ -818,15 +975,13 @@
 			   struct net_device *dev, const unsigned char *addr,
 			   u16 vid, u16 nlm_flags)
 {
-	struct switchdev_obj obj = {
-		.id = SWITCHDEV_OBJ_PORT_FDB,
-		.u.fdb = {
-			.addr = addr,
-			.vid = vid,
-		},
+	struct switchdev_obj_port_fdb fdb = {
+		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
+		.vid = vid,
 	};
 
-	return switchdev_port_obj_add(dev, &obj);
+	ether_addr_copy(fdb.addr, addr);
+	return switchdev_port_obj_add(dev, &fdb.obj);
 }
 EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
 
@@ -845,30 +1000,29 @@
 			   struct net_device *dev, const unsigned char *addr,
 			   u16 vid)
 {
-	struct switchdev_obj obj = {
-		.id = SWITCHDEV_OBJ_PORT_FDB,
-		.u.fdb = {
-			.addr = addr,
-			.vid = vid,
-		},
+	struct switchdev_obj_port_fdb fdb = {
+		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
+		.vid = vid,
 	};
 
-	return switchdev_port_obj_del(dev, &obj);
+	ether_addr_copy(fdb.addr, addr);
+	return switchdev_port_obj_del(dev, &fdb.obj);
 }
 EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
 
 struct switchdev_fdb_dump {
-	struct switchdev_obj obj;
+	struct switchdev_obj_port_fdb fdb;
+	struct net_device *dev;
 	struct sk_buff *skb;
 	struct netlink_callback *cb;
 	int idx;
 };
 
-static int switchdev_port_fdb_dump_cb(struct net_device *dev,
-				      struct switchdev_obj *obj)
+static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj)
 {
+	struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj);
 	struct switchdev_fdb_dump *dump =
-		container_of(obj, struct switchdev_fdb_dump, obj);
+		container_of(fdb, struct switchdev_fdb_dump, fdb);
 	u32 portid = NETLINK_CB(dump->cb->skb).portid;
 	u32 seq = dump->cb->nlh->nlmsg_seq;
 	struct nlmsghdr *nlh;
@@ -888,13 +1042,13 @@
 	ndm->ndm_pad2    = 0;
 	ndm->ndm_flags   = NTF_SELF;
 	ndm->ndm_type    = 0;
-	ndm->ndm_ifindex = dev->ifindex;
-	ndm->ndm_state   = obj->u.fdb.ndm_state;
+	ndm->ndm_ifindex = dump->dev->ifindex;
+	ndm->ndm_state   = fdb->ndm_state;
 
-	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr))
+	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr))
 		goto nla_put_failure;
 
-	if (obj->u.fdb.vid && nla_put_u16(dump->skb, NDA_VLAN, obj->u.fdb.vid))
+	if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid))
 		goto nla_put_failure;
 
 	nlmsg_end(dump->skb, nlh);
@@ -924,16 +1078,14 @@
 			    struct net_device *filter_dev, int idx)
 {
 	struct switchdev_fdb_dump dump = {
-		.obj = {
-			.id = SWITCHDEV_OBJ_PORT_FDB,
-			.cb = switchdev_port_fdb_dump_cb,
-		},
+		.fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
+		.dev = dev,
 		.skb = skb,
 		.cb = cb,
 		.idx = idx,
 	};
 
-	switchdev_port_obj_dump(dev, &dump.obj);
+	switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb);
 	return dump.idx;
 }
 EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
@@ -964,12 +1116,14 @@
 static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
 {
 	struct switchdev_attr attr = {
-		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
 	};
 	struct switchdev_attr prev_attr;
 	struct net_device *dev = NULL;
 	int nhsel;
 
+	ASSERT_RTNL();
+
 	/* For this route, all nexthop devs must be on the same switch. */
 
 	for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
@@ -1011,21 +1165,20 @@
 int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
 			   u8 tos, u8 type, u32 nlflags, u32 tb_id)
 {
-	struct switchdev_obj fib_obj = {
-		.id = SWITCHDEV_OBJ_IPV4_FIB,
-		.u.ipv4_fib = {
-			.dst = dst,
-			.dst_len = dst_len,
-			.fi = fi,
-			.tos = tos,
-			.type = type,
-			.nlflags = nlflags,
-			.tb_id = tb_id,
-		},
+	struct switchdev_obj_ipv4_fib ipv4_fib = {
+		.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
+		.dst = dst,
+		.dst_len = dst_len,
+		.tos = tos,
+		.type = type,
+		.nlflags = nlflags,
+		.tb_id = tb_id,
 	};
 	struct net_device *dev;
 	int err = 0;
 
+	memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
+
 	/* Don't offload route if using custom ip rules or if
 	 * IPv4 FIB offloading has been disabled completely.
 	 */
@@ -1042,7 +1195,7 @@
 	if (!dev)
 		return 0;
 
-	err = switchdev_port_obj_add(dev, &fib_obj);
+	err = switchdev_port_obj_add(dev, &ipv4_fib.obj);
 	if (!err)
 		fi->fib_flags |= RTNH_F_OFFLOAD;
 
@@ -1065,21 +1218,20 @@
 int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
 			   u8 tos, u8 type, u32 tb_id)
 {
-	struct switchdev_obj fib_obj = {
-		.id = SWITCHDEV_OBJ_IPV4_FIB,
-		.u.ipv4_fib = {
-			.dst = dst,
-			.dst_len = dst_len,
-			.fi = fi,
-			.tos = tos,
-			.type = type,
-			.nlflags = 0,
-			.tb_id = tb_id,
-		},
+	struct switchdev_obj_ipv4_fib ipv4_fib = {
+		.obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
+		.dst = dst,
+		.dst_len = dst_len,
+		.tos = tos,
+		.type = type,
+		.nlflags = 0,
+		.tb_id = tb_id,
 	};
 	struct net_device *dev;
 	int err = 0;
 
+	memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
+
 	if (!(fi->fib_flags & RTNH_F_OFFLOAD))
 		return 0;
 
@@ -1087,7 +1239,7 @@
 	if (!dev)
 		return 0;
 
-	err = switchdev_port_obj_del(dev, &fib_obj);
+	err = switchdev_port_obj_del(dev, &ipv4_fib.obj);
 	if (!err)
 		fi->fib_flags &= ~RTNH_F_OFFLOAD;
 
@@ -1119,11 +1271,11 @@
 					  struct net_device *b)
 {
 	struct switchdev_attr a_attr = {
-		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
 		.flags = SWITCHDEV_F_NO_RECURSE,
 	};
 	struct switchdev_attr b_attr = {
-		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
 		.flags = SWITCHDEV_F_NO_RECURSE,
 	};
 
@@ -1202,10 +1354,11 @@
 	u32 mark = dev->ifindex;
 	u32 reset_mark = 0;
 
-	if (group_dev && joining) {
-		mark = switchdev_port_fwd_mark_get(dev, group_dev);
-	} else if (group_dev && !joining) {
-		if (dev->offload_fwd_mark == mark)
+	if (group_dev) {
+		ASSERT_RTNL();
+		if (joining)
+			mark = switchdev_port_fwd_mark_get(dev, group_dev);
+		else if (dev->offload_fwd_mark == mark)
 			/* Ohoh, this port was the mark reference port,
 			 * but it's leaving the group, so reset the
 			 * mark for the remaining ports in the group.
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index e7000be..ed98c1f 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -94,10 +94,14 @@
 		goto out;
 	ret = register_pernet_subsys(&sysctl_pernet_ops);
 	if (ret)
-		goto out;
+		goto out1;
 	register_sysctl_root(&net_sysctl_root);
 out:
 	return ret;
+out1:
+	unregister_sysctl_table(net_header);
+	net_header = NULL;
+	goto out;
 }
 
 struct ctl_table_header *register_net_sysctl(struct net *net,
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 41042de..9dc239d 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -35,741 +35,301 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <linux/tipc_config.h>
 #include "socket.h"
 #include "msg.h"
 #include "bcast.h"
 #include "name_distr.h"
-#include "core.h"
+#include "link.h"
+#include "node.h"
 
-#define	MAX_PKT_DEFAULT_MCAST	1500	/* bcast link max packet size (fixed) */
-#define	BCLINK_WIN_DEFAULT	20	/* bcast link window size (default) */
+#define	BCLINK_WIN_DEFAULT	50	/* bcast link window size (default) */
+#define	BCLINK_WIN_MIN	        32	/* bcast minimum link window size */
 
 const char tipc_bclink_name[] = "broadcast-link";
 
-static void tipc_nmap_diff(struct tipc_node_map *nm_a,
-			   struct tipc_node_map *nm_b,
-			   struct tipc_node_map *nm_diff);
-static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
-static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
-
-static void tipc_bclink_lock(struct net *net)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-	spin_lock_bh(&tn->bclink->lock);
-}
-
-static void tipc_bclink_unlock(struct net *net)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-	spin_unlock_bh(&tn->bclink->lock);
-}
-
-void tipc_bclink_input(struct net *net)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-	tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
-}
-
-uint  tipc_bclink_get_mtu(void)
-{
-	return MAX_PKT_DEFAULT_MCAST;
-}
-
-static u32 bcbuf_acks(struct sk_buff *buf)
-{
-	return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
-}
-
-static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
-{
-	TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
-}
-
-static void bcbuf_decr_acks(struct sk_buff *buf)
-{
-	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
-}
-
-void tipc_bclink_add_node(struct net *net, u32 addr)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-	tipc_bclink_lock(net);
-	tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
-	tipc_bclink_unlock(net);
-}
-
-void tipc_bclink_remove_node(struct net *net, u32 addr)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-	tipc_bclink_lock(net);
-	tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
-
-	/* Last node? => reset backlog queue */
-	if (!tn->bclink->bcast_nodes.count)
-		tipc_link_purge_backlog(&tn->bclink->link);
-
-	tipc_bclink_unlock(net);
-}
-
-static void bclink_set_last_sent(struct net *net)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_link *bcl = tn->bcl;
-
-	bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
-}
-
-u32 tipc_bclink_get_last_sent(struct net *net)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-	return tn->bcl->silent_intv_cnt;
-}
-
-static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
-{
-	node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
-						seqno : node->bclink.last_sent;
-}
-
 /**
- * tipc_bclink_retransmit_to - get most recent node to request retransmission
- *
- * Called with bclink_lock locked
+ * struct tipc_bc_base - base structure for keeping broadcast send state
+ * @link: broadcast send link structure
+ * @inputq: data input queue; will only carry SOCK_WAKEUP messages
+ * @dest: array keeping number of reachable destinations per bearer
+ * @primary_bearer: a bearer having links to all broadcast destinations, if any
  */
-struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
+struct tipc_bc_base {
+	struct tipc_link *link;
+	struct sk_buff_head inputq;
+	int dests[MAX_BEARERS];
+	int primary_bearer;
+};
 
-	return tn->bclink->retransmit_to;
+static struct tipc_bc_base *tipc_bc_base(struct net *net)
+{
+	return tipc_net(net)->bcbase;
 }
 
-/**
- * bclink_retransmit_pkt - retransmit broadcast packets
- * @after: sequence number of last packet to *not* retransmit
- * @to: sequence number of last packet to retransmit
- *
- * Called with bclink_lock locked
- */
-static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
+int tipc_bcast_get_mtu(struct net *net)
 {
-	struct sk_buff *skb;
-	struct tipc_link *bcl = tn->bcl;
-
-	skb_queue_walk(&bcl->transmq, skb) {
-		if (more(buf_seqno(skb), after)) {
-			tipc_link_retransmit(bcl, skb, mod(to - after));
-			break;
-		}
-	}
+	return tipc_link_mtu(tipc_bc_sndlink(net));
 }
 
-/**
- * bclink_prepare_wakeup - prepare users for wakeup after congestion
- * @bcl: broadcast link
- * @resultq: queue for users which can be woken up
- * Move a number of waiting users, as permitted by available space in
- * the send queue, from link wait queue to specified queue for wakeup
+/* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
+ *                               if any, and make it primary bearer
  */
-static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
+static void tipc_bcbase_select_primary(struct net *net)
 {
-	int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
-	int imp, lim;
-	struct sk_buff *skb, *tmp;
+	struct tipc_bc_base *bb = tipc_bc_base(net);
+	int all_dests =  tipc_link_bc_peers(bb->link);
+	int i, mtu;
 
-	skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
-		imp = TIPC_SKB_CB(skb)->chain_imp;
-		lim = bcl->window + bcl->backlog[imp].limit;
-		pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
-		if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
+	bb->primary_bearer = INVALID_BEARER_ID;
+
+	if (!all_dests)
+		return;
+
+	for (i = 0; i < MAX_BEARERS; i++) {
+		if (!bb->dests[i])
 			continue;
-		skb_unlink(skb, &bcl->wakeupq);
-		skb_queue_tail(resultq, skb);
+
+		mtu = tipc_bearer_mtu(net, i);
+		if (mtu < tipc_link_mtu(bb->link))
+			tipc_link_set_mtu(bb->link, mtu);
+
+		if (bb->dests[i] < all_dests)
+			continue;
+
+		bb->primary_bearer = i;
+
+		/* Reduce risk that all nodes select same primary */
+		if ((i ^ tipc_own_addr(net)) & 1)
+			break;
 	}
 }
 
-/**
- * tipc_bclink_wakeup_users - wake up pending users
- *
- * Called with no locks taken
- */
-void tipc_bclink_wakeup_users(struct net *net)
+void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
 {
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_link *bcl = tn->bcl;
-	struct sk_buff_head resultq;
+	struct tipc_bc_base *bb = tipc_bc_base(net);
 
-	skb_queue_head_init(&resultq);
-	bclink_prepare_wakeup(bcl, &resultq);
-	tipc_sk_rcv(net, &resultq);
+	tipc_bcast_lock(net);
+	bb->dests[bearer_id]++;
+	tipc_bcbase_select_primary(net);
+	tipc_bcast_unlock(net);
 }
 
-/**
- * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
- * @n_ptr: node that sent acknowledgement info
- * @acked: broadcast sequence # that has been acknowledged
- *
- * Node is locked, bclink_lock unlocked.
- */
-void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
+void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
 {
-	struct sk_buff *skb, *tmp;
-	unsigned int released = 0;
-	struct net *net = n_ptr->net;
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
+	struct tipc_bc_base *bb = tipc_bc_base(net);
 
-	if (unlikely(!n_ptr->bclink.recv_permitted))
+	tipc_bcast_lock(net);
+	bb->dests[bearer_id]--;
+	tipc_bcbase_select_primary(net);
+	tipc_bcast_unlock(net);
+}
+
+/* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
+ *
+ * Note that number of reachable destinations, as indicated in the dests[]
+ * array, may transitionally differ from the number of destinations indicated
+ * in each sent buffer. We can sustain this. Excess destination nodes will
+ * drop and never acknowledge the unexpected packets, and missing destinations
+ * will either require retransmission (if they are just about to be added to
+ * the bearer), or be removed from the buffer's 'ackers' counter (if they
+ * just went down)
+ */
+static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
+{
+	int bearer_id;
+	struct tipc_bc_base *bb = tipc_bc_base(net);
+	struct sk_buff *skb, *_skb;
+	struct sk_buff_head _xmitq;
+
+	if (skb_queue_empty(xmitq))
 		return;
 
-	tipc_bclink_lock(net);
-
-	/* Bail out if tx queue is empty (no clean up is required) */
-	skb = skb_peek(&tn->bcl->transmq);
-	if (!skb)
-		goto exit;
-
-	/* Determine which messages need to be acknowledged */
-	if (acked == INVALID_LINK_SEQ) {
-		/*
-		 * Contact with specified node has been lost, so need to
-		 * acknowledge sent messages only (if other nodes still exist)
-		 * or both sent and unsent messages (otherwise)
-		 */
-		if (tn->bclink->bcast_nodes.count)
-			acked = tn->bcl->silent_intv_cnt;
-		else
-			acked = tn->bcl->snd_nxt;
-	} else {
-		/*
-		 * Bail out if specified sequence number does not correspond
-		 * to a message that has been sent and not yet acknowledged
-		 */
-		if (less(acked, buf_seqno(skb)) ||
-		    less(tn->bcl->silent_intv_cnt, acked) ||
-		    less_eq(acked, n_ptr->bclink.acked))
-			goto exit;
+	/* The typical case: at least one bearer has links to all nodes */
+	bearer_id = bb->primary_bearer;
+	if (bearer_id >= 0) {
+		tipc_bearer_bc_xmit(net, bearer_id, xmitq);
+		return;
 	}
 
-	/* Skip over packets that node has previously acknowledged */
-	skb_queue_walk(&tn->bcl->transmq, skb) {
-		if (more(buf_seqno(skb), n_ptr->bclink.acked))
-			break;
-	}
+	/* We have to transmit across all bearers */
+	skb_queue_head_init(&_xmitq);
+	for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+		if (!bb->dests[bearer_id])
+			continue;
 
-	/* Update packets that node is now acknowledging */
-	skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
-		if (more(buf_seqno(skb), acked))
-			break;
-		bcbuf_decr_acks(skb);
-		bclink_set_last_sent(net);
-		if (bcbuf_acks(skb) == 0) {
-			__skb_unlink(skb, &tn->bcl->transmq);
-			kfree_skb(skb);
-			released = 1;
+		skb_queue_walk(xmitq, skb) {
+			_skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
+			if (!_skb)
+				break;
+			__skb_queue_tail(&_xmitq, _skb);
 		}
+		tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
 	}
-	n_ptr->bclink.acked = acked;
-
-	/* Try resolving broadcast link congestion, if necessary */
-	if (unlikely(skb_peek(&tn->bcl->backlogq))) {
-		tipc_link_push_packets(tn->bcl);
-		bclink_set_last_sent(net);
-	}
-	if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
-		n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
-exit:
-	tipc_bclink_unlock(net);
+	__skb_queue_purge(xmitq);
+	__skb_queue_purge(&_xmitq);
 }
 
-/**
- * tipc_bclink_update_link_state - update broadcast link state
- *
- * RCU and node lock set
- */
-void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
-				   u32 last_sent)
-{
-	struct sk_buff *buf;
-	struct net *net = n_ptr->net;
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-	/* Ignore "stale" link state info */
-	if (less_eq(last_sent, n_ptr->bclink.last_in))
-		return;
-
-	/* Update link synchronization state; quit if in sync */
-	bclink_update_last_sent(n_ptr, last_sent);
-
-	if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
-		return;
-
-	/* Update out-of-sync state; quit if loss is still unconfirmed */
-	if ((++n_ptr->bclink.oos_state) == 1) {
-		if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
-			return;
-		n_ptr->bclink.oos_state++;
-	}
-
-	/* Don't NACK if one has been recently sent (or seen) */
-	if (n_ptr->bclink.oos_state & 0x1)
-		return;
-
-	/* Send NACK */
-	buf = tipc_buf_acquire(INT_H_SIZE);
-	if (buf) {
-		struct tipc_msg *msg = buf_msg(buf);
-		struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
-		u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
-
-		tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
-			      INT_H_SIZE, n_ptr->addr);
-		msg_set_non_seq(msg, 1);
-		msg_set_mc_netid(msg, tn->net_id);
-		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
-		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
-		msg_set_bcgap_to(msg, to);
-
-		tipc_bclink_lock(net);
-		tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
-		tn->bcl->stats.sent_nacks++;
-		tipc_bclink_unlock(net);
-		kfree_skb(buf);
-
-		n_ptr->bclink.oos_state++;
-	}
-}
-
-void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
-{
-	u16 last = msg_last_bcast(hdr);
-	int mtyp = msg_type(hdr);
-
-	if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
-		return;
-	if (mtyp == STATE_MSG) {
-		tipc_bclink_update_link_state(n, last);
-		return;
-	}
-	/* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
-	 * and transfer synch info in LINK_PROTOCOL messages.
-	 */
-	if (tipc_node_is_up(n))
-		return;
-	if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
-		return;
-	n->bclink.last_sent = last;
-	n->bclink.last_in = last;
-	n->bclink.oos_state = 0;
-}
-
-/**
- * bclink_peek_nack - monitor retransmission requests sent by other nodes
- *
- * Delay any upcoming NACK by this node if another node has already
- * requested the first message this node is going to ask for.
- */
-static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
-{
-	struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
-
-	if (unlikely(!n_ptr))
-		return;
-
-	tipc_node_lock(n_ptr);
-	if (n_ptr->bclink.recv_permitted &&
-	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
-	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
-		n_ptr->bclink.oos_state = 2;
-	tipc_node_unlock(n_ptr);
-	tipc_node_put(n_ptr);
-}
-
-/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
+/* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
  *                    and to identified node local sockets
  * @net: the applicable net namespace
  * @list: chain of buffers containing message
  * Consumes the buffer chain, except when returning -ELINKCONG
  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
-int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
 {
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_link *bcl = tn->bcl;
-	struct tipc_bclink *bclink = tn->bclink;
+	struct tipc_link *l = tipc_bc_sndlink(net);
+	struct sk_buff_head xmitq, inputq, rcvq;
 	int rc = 0;
-	int bc = 0;
-	struct sk_buff *skb;
-	struct sk_buff_head arrvq;
-	struct sk_buff_head inputq;
 
-	/* Prepare clone of message for local node */
-	skb = tipc_msg_reassemble(list);
-	if (unlikely(!skb))
+	__skb_queue_head_init(&rcvq);
+	__skb_queue_head_init(&xmitq);
+	skb_queue_head_init(&inputq);
+
+	/* Prepare message clone for local node */
+	if (unlikely(!tipc_msg_reassemble(list, &rcvq)))
 		return -EHOSTUNREACH;
 
-	/* Broadcast to all nodes */
-	if (likely(bclink)) {
-		tipc_bclink_lock(net);
-		if (likely(bclink->bcast_nodes.count)) {
-			rc = __tipc_link_xmit(net, bcl, list);
-			if (likely(!rc)) {
-				u32 len = skb_queue_len(&bcl->transmq);
+	tipc_bcast_lock(net);
+	if (tipc_link_bc_peers(l))
+		rc = tipc_link_xmit(l, list, &xmitq);
+	tipc_bcast_unlock(net);
 
-				bclink_set_last_sent(net);
-				bcl->stats.queue_sz_counts++;
-				bcl->stats.accu_queue_sz += len;
-			}
-			bc = 1;
-		}
-		tipc_bclink_unlock(net);
-	}
-
-	if (unlikely(!bc))
-		__skb_queue_purge(list);
-
+	/* Don't send to local node if adding to link failed */
 	if (unlikely(rc)) {
-		kfree_skb(skb);
+		__skb_queue_purge(&rcvq);
 		return rc;
 	}
-	/* Deliver message clone */
-	__skb_queue_head_init(&arrvq);
-	skb_queue_head_init(&inputq);
-	__skb_queue_tail(&arrvq, skb);
-	tipc_sk_mcast_rcv(net, &arrvq, &inputq);
-	return rc;
-}
 
-/**
- * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
- *
- * Called with both sending node's lock and bclink_lock taken.
- */
-static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
-{
-	struct tipc_net *tn = net_generic(node->net, tipc_net_id);
-
-	bclink_update_last_sent(node, seqno);
-	node->bclink.last_in = seqno;
-	node->bclink.oos_state = 0;
-	tn->bcl->stats.recv_info++;
-
-	/*
-	 * Unicast an ACK periodically, ensuring that
-	 * all nodes in the cluster don't ACK at the same time
-	 */
-	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
-		tipc_link_proto_xmit(node_active_link(node, node->addr),
-				     STATE_MSG, 0, 0, 0, 0);
-		tn->bcl->stats.sent_acks++;
-	}
-}
-
-/**
- * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
- *
- * RCU is locked, no other locks set
- */
-void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_link *bcl = tn->bcl;
-	struct tipc_msg *msg = buf_msg(buf);
-	struct tipc_node *node;
-	u32 next_in;
-	u32 seqno;
-	int deferred = 0;
-	int pos = 0;
-	struct sk_buff *iskb;
-	struct sk_buff_head *arrvq, *inputq;
-
-	/* Screen out unwanted broadcast messages */
-	if (msg_mc_netid(msg) != tn->net_id)
-		goto exit;
-
-	node = tipc_node_find(net, msg_prevnode(msg));
-	if (unlikely(!node))
-		goto exit;
-
-	tipc_node_lock(node);
-	if (unlikely(!node->bclink.recv_permitted))
-		goto unlock;
-
-	/* Handle broadcast protocol message */
-	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
-		if (msg_type(msg) != STATE_MSG)
-			goto unlock;
-		if (msg_destnode(msg) == tn->own_addr) {
-			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
-			tipc_bclink_lock(net);
-			bcl->stats.recv_nacks++;
-			tn->bclink->retransmit_to = node;
-			bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
-					      msg_bcgap_to(msg));
-			tipc_bclink_unlock(net);
-			tipc_node_unlock(node);
-		} else {
-			tipc_node_unlock(node);
-			bclink_peek_nack(net, msg);
-		}
-		tipc_node_put(node);
-		goto exit;
-	}
-
-	/* Handle in-sequence broadcast message */
-	seqno = msg_seqno(msg);
-	next_in = mod(node->bclink.last_in + 1);
-	arrvq = &tn->bclink->arrvq;
-	inputq = &tn->bclink->inputq;
-
-	if (likely(seqno == next_in)) {
-receive:
-		/* Deliver message to destination */
-		if (likely(msg_isdata(msg))) {
-			tipc_bclink_lock(net);
-			bclink_accept_pkt(node, seqno);
-			spin_lock_bh(&inputq->lock);
-			__skb_queue_tail(arrvq, buf);
-			spin_unlock_bh(&inputq->lock);
-			node->action_flags |= TIPC_BCAST_MSG_EVT;
-			tipc_bclink_unlock(net);
-			tipc_node_unlock(node);
-		} else if (msg_user(msg) == MSG_BUNDLER) {
-			tipc_bclink_lock(net);
-			bclink_accept_pkt(node, seqno);
-			bcl->stats.recv_bundles++;
-			bcl->stats.recv_bundled += msg_msgcnt(msg);
-			pos = 0;
-			while (tipc_msg_extract(buf, &iskb, &pos)) {
-				spin_lock_bh(&inputq->lock);
-				__skb_queue_tail(arrvq, iskb);
-				spin_unlock_bh(&inputq->lock);
-			}
-			node->action_flags |= TIPC_BCAST_MSG_EVT;
-			tipc_bclink_unlock(net);
-			tipc_node_unlock(node);
-		} else if (msg_user(msg) == MSG_FRAGMENTER) {
-			tipc_bclink_lock(net);
-			bclink_accept_pkt(node, seqno);
-			tipc_buf_append(&node->bclink.reasm_buf, &buf);
-			if (unlikely(!buf && !node->bclink.reasm_buf)) {
-				tipc_bclink_unlock(net);
-				goto unlock;
-			}
-			bcl->stats.recv_fragments++;
-			if (buf) {
-				bcl->stats.recv_fragmented++;
-				msg = buf_msg(buf);
-				tipc_bclink_unlock(net);
-				goto receive;
-			}
-			tipc_bclink_unlock(net);
-			tipc_node_unlock(node);
-		} else {
-			tipc_bclink_lock(net);
-			bclink_accept_pkt(node, seqno);
-			tipc_bclink_unlock(net);
-			tipc_node_unlock(node);
-			kfree_skb(buf);
-		}
-		buf = NULL;
-
-		/* Determine new synchronization state */
-		tipc_node_lock(node);
-		if (unlikely(!tipc_node_is_up(node)))
-			goto unlock;
-
-		if (node->bclink.last_in == node->bclink.last_sent)
-			goto unlock;
-
-		if (skb_queue_empty(&node->bclink.deferdq)) {
-			node->bclink.oos_state = 1;
-			goto unlock;
-		}
-
-		msg = buf_msg(skb_peek(&node->bclink.deferdq));
-		seqno = msg_seqno(msg);
-		next_in = mod(next_in + 1);
-		if (seqno != next_in)
-			goto unlock;
-
-		/* Take in-sequence message from deferred queue & deliver it */
-		buf = __skb_dequeue(&node->bclink.deferdq);
-		goto receive;
-	}
-
-	/* Handle out-of-sequence broadcast message */
-	if (less(next_in, seqno)) {
-		deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
-					       buf);
-		bclink_update_last_sent(node, seqno);
-		buf = NULL;
-	}
-
-	tipc_bclink_lock(net);
-
-	if (deferred)
-		bcl->stats.deferred_recv++;
-	else
-		bcl->stats.duplicates++;
-
-	tipc_bclink_unlock(net);
-
-unlock:
-	tipc_node_unlock(node);
-	tipc_node_put(node);
-exit:
-	kfree_skb(buf);
-}
-
-u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
-{
-	return (n_ptr->bclink.recv_permitted &&
-		(tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
-}
-
-
-/**
- * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
- *
- * Send packet over as many bearers as necessary to reach all nodes
- * that have joined the broadcast link.
- *
- * Returns 0 (packet sent successfully) under all circumstances,
- * since the broadcast link's pseudo-bearer never blocks
- */
-static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
-			      struct tipc_bearer *unused1,
-			      struct tipc_media_addr *unused2)
-{
-	int bp_index;
-	struct tipc_msg *msg = buf_msg(buf);
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_bcbearer *bcbearer = tn->bcbearer;
-	struct tipc_bclink *bclink = tn->bclink;
-
-	/* Prepare broadcast link message for reliable transmission,
-	 * if first time trying to send it;
-	 * preparation is skipped for broadcast link protocol messages
-	 * since they are sent in an unreliable manner and don't need it
-	 */
-	if (likely(!msg_non_seq(buf_msg(buf)))) {
-		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
-		msg_set_non_seq(msg, 1);
-		msg_set_mc_netid(msg, tn->net_id);
-		tn->bcl->stats.sent_info++;
-		if (WARN_ON(!bclink->bcast_nodes.count)) {
-			dump_stack();
-			return 0;
-		}
-	}
-
-	/* Send buffer over bearers until all targets reached */
-	bcbearer->remains = bclink->bcast_nodes;
-
-	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
-		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
-		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
-		struct tipc_bearer *bp[2] = {p, s};
-		struct tipc_bearer *b = bp[msg_link_selector(msg)];
-		struct sk_buff *tbuf;
-
-		if (!p)
-			break; /* No more bearers to try */
-		if (!b)
-			b = p;
-		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
-			       &bcbearer->remains_new);
-		if (bcbearer->remains_new.count == bcbearer->remains.count)
-			continue; /* Nothing added by bearer pair */
-
-		if (bp_index == 0) {
-			/* Use original buffer for first bearer */
-			tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
-		} else {
-			/* Avoid concurrent buffer access */
-			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
-			if (!tbuf)
-				break;
-			tipc_bearer_send(net, b->identity, tbuf,
-					 &b->bcast_addr);
-			kfree_skb(tbuf); /* Bearer keeps a clone */
-		}
-		if (bcbearer->remains_new.count == 0)
-			break; /* All targets reached */
-
-		bcbearer->remains = bcbearer->remains_new;
-	}
-
+	/* Broadcast to all nodes, inluding local node */
+	tipc_bcbase_xmit(net, &xmitq);
+	tipc_sk_mcast_rcv(net, &rcvq, &inputq);
+	__skb_queue_purge(list);
 	return 0;
 }
 
-/**
- * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
+/* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
+ *
+ * RCU is locked, no other locks set
  */
-void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
-			u32 node, bool action)
+int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
 {
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_bcbearer *bcbearer = tn->bcbearer;
-	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
-	struct tipc_bcbearer_pair *bp_curr;
-	struct tipc_bearer *b;
-	int b_index;
-	int pri;
+	struct tipc_msg *hdr = buf_msg(skb);
+	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+	struct sk_buff_head xmitq;
+	int rc;
 
-	tipc_bclink_lock(net);
+	__skb_queue_head_init(&xmitq);
 
-	if (action)
-		tipc_nmap_add(nm_ptr, node);
+	if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
+		kfree_skb(skb);
+		return 0;
+	}
+
+	tipc_bcast_lock(net);
+	if (msg_user(hdr) == BCAST_PROTOCOL)
+		rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
 	else
-		tipc_nmap_remove(nm_ptr, node);
+		rc = tipc_link_rcv(l, skb, NULL);
+	tipc_bcast_unlock(net);
 
-	/* Group bearers by priority (can assume max of two per priority) */
-	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
+	tipc_bcbase_xmit(net, &xmitq);
 
-	rcu_read_lock();
-	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
-		b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
-		if (!b || !b->nodes.count)
-			continue;
+	/* Any socket wakeup messages ? */
+	if (!skb_queue_empty(inputq))
+		tipc_sk_rcv(net, inputq);
 
-		if (!bp_temp[b->priority].primary)
-			bp_temp[b->priority].primary = b;
-		else
-			bp_temp[b->priority].secondary = b;
+	return rc;
+}
+
+/* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
+ *
+ * RCU is locked, no other locks set
+ */
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked)
+{
+	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+	struct sk_buff_head xmitq;
+
+	__skb_queue_head_init(&xmitq);
+
+	tipc_bcast_lock(net);
+	tipc_link_bc_ack_rcv(l, acked, &xmitq);
+	tipc_bcast_unlock(net);
+
+	tipc_bcbase_xmit(net, &xmitq);
+
+	/* Any socket wakeup messages ? */
+	if (!skb_queue_empty(inputq))
+		tipc_sk_rcv(net, inputq);
+}
+
+/* tipc_bcast_synch_rcv -  check and update rcv link with peer's send state
+ *
+ * RCU is locked, no other locks set
+ */
+void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
+			 struct tipc_msg *hdr)
+{
+	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+	struct sk_buff_head xmitq;
+
+	__skb_queue_head_init(&xmitq);
+
+	tipc_bcast_lock(net);
+	if (msg_type(hdr) == STATE_MSG) {
+		tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
+		tipc_link_bc_sync_rcv(l, hdr, &xmitq);
+	} else {
+		tipc_link_bc_init_rcv(l, hdr);
 	}
-	rcu_read_unlock();
+	tipc_bcast_unlock(net);
 
-	/* Create array of bearer pairs for broadcasting */
-	bp_curr = bcbearer->bpairs;
-	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
+	tipc_bcbase_xmit(net, &xmitq);
 
-	for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
+	/* Any socket wakeup messages ? */
+	if (!skb_queue_empty(inputq))
+		tipc_sk_rcv(net, inputq);
+}
 
-		if (!bp_temp[pri].primary)
-			continue;
+/* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
+ *
+ * RCU is locked, node lock is set
+ */
+void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
+			 struct sk_buff_head *xmitq)
+{
+	struct tipc_link *snd_l = tipc_bc_sndlink(net);
 
-		bp_curr->primary = bp_temp[pri].primary;
+	tipc_bcast_lock(net);
+	tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
+	tipc_bcbase_select_primary(net);
+	tipc_bcast_unlock(net);
+}
 
-		if (bp_temp[pri].secondary) {
-			if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
-					    &bp_temp[pri].secondary->nodes)) {
-				bp_curr->secondary = bp_temp[pri].secondary;
-			} else {
-				bp_curr++;
-				bp_curr->primary = bp_temp[pri].secondary;
-			}
-		}
+/* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
+ *
+ * RCU is locked, node lock is set
+ */
+void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
+{
+	struct tipc_link *snd_l = tipc_bc_sndlink(net);
+	struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+	struct sk_buff_head xmitq;
 
-		bp_curr++;
-	}
+	__skb_queue_head_init(&xmitq);
 
-	tipc_bclink_unlock(net);
+	tipc_bcast_lock(net);
+	tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
+	tipc_bcbase_select_primary(net);
+	tipc_bcast_unlock(net);
+
+	tipc_bcbase_xmit(net, &xmitq);
+
+	/* Any socket wakeup messages ? */
+	if (!skb_queue_empty(inputq))
+		tipc_sk_rcv(net, inputq);
 }
 
 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
@@ -835,7 +395,7 @@
 	if (!bcl)
 		return 0;
 
-	tipc_bclink_lock(net);
+	tipc_bcast_lock(net);
 
 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
@@ -870,7 +430,7 @@
 	if (err)
 		goto attr_msg_full;
 
-	tipc_bclink_unlock(net);
+	tipc_bcast_unlock(net);
 	nla_nest_end(msg->skb, attrs);
 	genlmsg_end(msg->skb, hdr);
 
@@ -881,7 +441,7 @@
 attr_msg_full:
 	nla_nest_cancel(msg->skb, attrs);
 msg_full:
-	tipc_bclink_unlock(net);
+	tipc_bcast_unlock(net);
 	genlmsg_cancel(msg->skb, hdr);
 
 	return -EMSGSIZE;
@@ -895,25 +455,25 @@
 	if (!bcl)
 		return -ENOPROTOOPT;
 
-	tipc_bclink_lock(net);
+	tipc_bcast_lock(net);
 	memset(&bcl->stats, 0, sizeof(bcl->stats));
-	tipc_bclink_unlock(net);
+	tipc_bcast_unlock(net);
 	return 0;
 }
 
-int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
+static int tipc_bc_link_set_queue_limits(struct net *net, u32 limit)
 {
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_link *bcl = tn->bcl;
+	struct tipc_link *l = tipc_bc_sndlink(net);
 
-	if (!bcl)
+	if (!l)
 		return -ENOPROTOOPT;
-	if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
+	if (limit < BCLINK_WIN_MIN)
+		limit = BCLINK_WIN_MIN;
+	if (limit > TIPC_MAX_LINK_WIN)
 		return -EINVAL;
-
-	tipc_bclink_lock(net);
-	tipc_link_set_queue_limits(bcl, limit);
-	tipc_bclink_unlock(net);
+	tipc_bcast_lock(net);
+	tipc_link_set_queue_limits(l, limit);
+	tipc_bcast_unlock(net);
 	return 0;
 }
 
@@ -935,123 +495,51 @@
 
 	win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
 
-	return tipc_bclink_set_queue_limits(net, win);
+	return tipc_bc_link_set_queue_limits(net, win);
 }
 
-int tipc_bclink_init(struct net *net)
+int tipc_bcast_init(struct net *net)
 {
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_bcbearer *bcbearer;
-	struct tipc_bclink *bclink;
-	struct tipc_link *bcl;
+	struct tipc_net *tn = tipc_net(net);
+	struct tipc_bc_base *bb = NULL;
+	struct tipc_link *l = NULL;
 
-	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
-	if (!bcbearer)
-		return -ENOMEM;
+	bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
+	if (!bb)
+		goto enomem;
+	tn->bcbase = bb;
+	spin_lock_init(&tipc_net(net)->bclock);
 
-	bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
-	if (!bclink) {
-		kfree(bcbearer);
-		return -ENOMEM;
-	}
-
-	bcl = &bclink->link;
-	bcbearer->bearer.media = &bcbearer->media;
-	bcbearer->media.send_msg = tipc_bcbearer_send;
-	sprintf(bcbearer->media.name, "tipc-broadcast");
-
-	spin_lock_init(&bclink->lock);
-	__skb_queue_head_init(&bcl->transmq);
-	__skb_queue_head_init(&bcl->backlogq);
-	__skb_queue_head_init(&bcl->deferdq);
-	skb_queue_head_init(&bcl->wakeupq);
-	bcl->snd_nxt = 1;
-	spin_lock_init(&bclink->node.lock);
-	__skb_queue_head_init(&bclink->arrvq);
-	skb_queue_head_init(&bclink->inputq);
-	bcl->owner = &bclink->node;
-	bcl->owner->net = net;
-	bcl->mtu = MAX_PKT_DEFAULT_MCAST;
-	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
-	bcl->bearer_id = MAX_BEARERS;
-	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
-	bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
-	msg_set_prevnode(bcl->pmsg, tn->own_addr);
-	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
-	tn->bcbearer = bcbearer;
-	tn->bclink = bclink;
-	tn->bcl = bcl;
+	if (!tipc_link_bc_create(net, 0, 0,
+				 U16_MAX,
+				 BCLINK_WIN_DEFAULT,
+				 0,
+				 &bb->inputq,
+				 NULL,
+				 NULL,
+				 &l))
+		goto enomem;
+	bb->link = l;
+	tn->bcl = l;
 	return 0;
+enomem:
+	kfree(bb);
+	kfree(l);
+	return -ENOMEM;
 }
 
-void tipc_bclink_stop(struct net *net)
+void tipc_bcast_reinit(struct net *net)
+{
+	struct tipc_bc_base *b = tipc_bc_base(net);
+
+	msg_set_prevnode(b->link->pmsg, tipc_own_addr(net));
+}
+
+void tipc_bcast_stop(struct net *net)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-	tipc_bclink_lock(net);
-	tipc_link_purge_queues(tn->bcl);
-	tipc_bclink_unlock(net);
-
-	RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
 	synchronize_net();
-	kfree(tn->bcbearer);
-	kfree(tn->bclink);
-}
-
-/**
- * tipc_nmap_add - add a node to a node map
- */
-static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
-{
-	int n = tipc_node(node);
-	int w = n / WSIZE;
-	u32 mask = (1 << (n % WSIZE));
-
-	if ((nm_ptr->map[w] & mask) == 0) {
-		nm_ptr->count++;
-		nm_ptr->map[w] |= mask;
-	}
-}
-
-/**
- * tipc_nmap_remove - remove a node from a node map
- */
-static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
-{
-	int n = tipc_node(node);
-	int w = n / WSIZE;
-	u32 mask = (1 << (n % WSIZE));
-
-	if ((nm_ptr->map[w] & mask) != 0) {
-		nm_ptr->map[w] &= ~mask;
-		nm_ptr->count--;
-	}
-}
-
-/**
- * tipc_nmap_diff - find differences between node maps
- * @nm_a: input node map A
- * @nm_b: input node map B
- * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
- */
-static void tipc_nmap_diff(struct tipc_node_map *nm_a,
-			   struct tipc_node_map *nm_b,
-			   struct tipc_node_map *nm_diff)
-{
-	int stop = ARRAY_SIZE(nm_a->map);
-	int w;
-	int b;
-	u32 map;
-
-	memset(nm_diff, 0, sizeof(*nm_diff));
-	for (w = 0; w < stop; w++) {
-		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
-		nm_diff->map[w] = map;
-		if (map != 0) {
-			for (b = 0 ; b < WSIZE; b++) {
-				if (map & (1 << b))
-					nm_diff->count++;
-			}
-		}
-	}
+	kfree(tn->bcbase);
+	kfree(tn->bcl);
 }
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index d74c69b..2855b93 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -37,102 +37,44 @@
 #ifndef _TIPC_BCAST_H
 #define _TIPC_BCAST_H
 
-#include <linux/tipc_config.h>
-#include "link.h"
-#include "node.h"
-
-/**
- * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
- * @primary: pointer to primary bearer
- * @secondary: pointer to secondary bearer
- *
- * Bearers must have same priority and same set of reachable destinations
- * to be paired.
- */
-
-struct tipc_bcbearer_pair {
-	struct tipc_bearer *primary;
-	struct tipc_bearer *secondary;
-};
-
-#define	BCBEARER		MAX_BEARERS
-
-/**
- * struct tipc_bcbearer - bearer used by broadcast link
- * @bearer: (non-standard) broadcast bearer structure
- * @media: (non-standard) broadcast media structure
- * @bpairs: array of bearer pairs
- * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
- * @remains: temporary node map used by tipc_bcbearer_send()
- * @remains_new: temporary node map used tipc_bcbearer_send()
- *
- * Note: The fields labelled "temporary" are incorporated into the bearer
- * to avoid consuming potentially limited stack space through the use of
- * large local variables within multicast routines.  Concurrent access is
- * prevented through use of the spinlock "bclink_lock".
- */
-struct tipc_bcbearer {
-	struct tipc_bearer bearer;
-	struct tipc_media media;
-	struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
-	struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
-	struct tipc_node_map remains;
-	struct tipc_node_map remains_new;
-};
-
-/**
- * struct tipc_bclink - link used for broadcast messages
- * @lock: spinlock governing access to structure
- * @link: (non-standard) broadcast link structure
- * @node: (non-standard) node structure representing b'cast link's peer node
- * @bcast_nodes: map of broadcast-capable nodes
- * @retransmit_to: node that most recently requested a retransmit
- *
- * Handles sequence numbering, fragmentation, bundling, etc.
- */
-struct tipc_bclink {
-	spinlock_t lock;
-	struct tipc_link link;
-	struct tipc_node node;
-	struct sk_buff_head arrvq;
-	struct sk_buff_head inputq;
-	struct tipc_node_map bcast_nodes;
-	struct tipc_node *retransmit_to;
-};
+#include "core.h"
 
 struct tipc_node;
-extern const char tipc_bclink_name[];
+struct tipc_msg;
+struct tipc_nl_msg;
+struct tipc_node_map;
 
-/**
- * tipc_nmap_equal - test for equality of node maps
- */
-static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
-				  struct tipc_node_map *nm_b)
-{
-	return !memcmp(nm_a, nm_b, sizeof(*nm_a));
-}
-
-int tipc_bclink_init(struct net *net);
-void tipc_bclink_stop(struct net *net);
-void tipc_bclink_add_node(struct net *net, u32 addr);
-void tipc_bclink_remove_node(struct net *net, u32 addr);
-struct tipc_node *tipc_bclink_retransmit_to(struct net *tn);
-void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
-void tipc_bclink_rcv(struct net *net, struct sk_buff *buf);
-u32  tipc_bclink_get_last_sent(struct net *net);
-u32  tipc_bclink_acks_missing(struct tipc_node *n_ptr);
-void tipc_bclink_update_link_state(struct tipc_node *node,
-				   u32 last_sent);
-int  tipc_bclink_reset_stats(struct net *net);
-int  tipc_bclink_set_queue_limits(struct net *net, u32 limit);
-void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
-			u32 node, bool action);
-uint  tipc_bclink_get_mtu(void);
-int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list);
-void tipc_bclink_wakeup_users(struct net *net);
+int tipc_bcast_init(struct net *net);
+void tipc_bcast_reinit(struct net *net);
+void tipc_bcast_stop(struct net *net);
+void tipc_bcast_add_peer(struct net *net, struct tipc_link *l,
+			 struct sk_buff_head *xmitq);
+void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_bcl);
+void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id);
+void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
+int  tipc_bcast_get_mtu(struct net *net);
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list);
+int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked);
+void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
+			 struct tipc_msg *hdr);
 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
 int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
-void tipc_bclink_input(struct net *net);
-void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *msg);
+int tipc_bclink_reset_stats(struct net *net);
+
+static inline void tipc_bcast_lock(struct net *net)
+{
+	spin_lock_bh(&tipc_net(net)->bclock);
+}
+
+static inline void tipc_bcast_unlock(struct net *net)
+{
+	spin_unlock_bh(&tipc_net(net)->bclock);
+}
+
+static inline struct tipc_link *tipc_bc_sndlink(struct net *net)
+{
+	return tipc_net(net)->bcl;
+}
 
 #endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index ce9f7bf..648f2a6 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -193,10 +193,8 @@
 
 	rcu_read_lock();
 	b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
-	if (b_ptr) {
-		tipc_bcbearer_sort(net, &b_ptr->nodes, dest, true);
+	if (b_ptr)
 		tipc_disc_add_dest(b_ptr->link_req);
-	}
 	rcu_read_unlock();
 }
 
@@ -207,10 +205,8 @@
 
 	rcu_read_lock();
 	b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
-	if (b_ptr) {
-		tipc_bcbearer_sort(net, &b_ptr->nodes, dest, false);
+	if (b_ptr)
 		tipc_disc_remove_dest(b_ptr->link_req);
-	}
 	rcu_read_unlock();
 }
 
@@ -362,6 +358,7 @@
 	b_ptr->media->disable_media(b_ptr);
 
 	tipc_node_delete_links(net, b_ptr->identity);
+	RCU_INIT_POINTER(b_ptr->media_ptr, NULL);
 	if (b_ptr->link_req)
 		tipc_disc_delete(b_ptr->link_req);
 
@@ -399,16 +396,13 @@
 
 /* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
  *
- * Mark L2 bearer as inactive so that incoming buffers are thrown away,
- * then get worker thread to complete bearer cleanup.  (Can't do cleanup
- * here because cleanup code needs to sleep and caller holds spinlocks.)
+ * Mark L2 bearer as inactive so that incoming buffers are thrown away
  */
 void tipc_disable_l2_media(struct tipc_bearer *b)
 {
 	struct net_device *dev;
 
 	dev = (struct net_device *)rtnl_dereference(b->media_ptr);
-	RCU_INIT_POINTER(b->media_ptr, NULL);
 	RCU_INIT_POINTER(dev->tipc_ptr, NULL);
 	synchronize_net();
 	dev_put(dev);
@@ -420,10 +414,9 @@
  * @b_ptr: the bearer through which the packet is to be sent
  * @dest: peer destination address
  */
-int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
+int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
 		     struct tipc_bearer *b, struct tipc_media_addr *dest)
 {
-	struct sk_buff *clone;
 	struct net_device *dev;
 	int delta;
 
@@ -431,42 +424,48 @@
 	if (!dev)
 		return 0;
 
-	clone = skb_clone(buf, GFP_ATOMIC);
-	if (!clone)
-		return 0;
-
-	delta = dev->hard_header_len - skb_headroom(buf);
+	delta = dev->hard_header_len - skb_headroom(skb);
 	if ((delta > 0) &&
-	    pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
-		kfree_skb(clone);
+	    pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
+		kfree_skb(skb);
 		return 0;
 	}
 
-	skb_reset_network_header(clone);
-	clone->dev = dev;
-	clone->protocol = htons(ETH_P_TIPC);
-	dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
-			dev->dev_addr, clone->len);
-	dev_queue_xmit(clone);
+	skb_reset_network_header(skb);
+	skb->dev = dev;
+	skb->protocol = htons(ETH_P_TIPC);
+	dev_hard_header(skb, dev, ETH_P_TIPC, dest->value,
+			dev->dev_addr, skb->len);
+	dev_queue_xmit(skb);
 	return 0;
 }
 
-/* tipc_bearer_send- sends buffer to destination over bearer
- *
- * IMPORTANT:
- * The media send routine must not alter the buffer being passed in
- * as it may be needed for later retransmission!
- */
-void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
-		      struct tipc_media_addr *dest)
+int tipc_bearer_mtu(struct net *net, u32 bearer_id)
 {
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_bearer *b_ptr;
+	int mtu = 0;
+	struct tipc_bearer *b;
 
 	rcu_read_lock();
-	b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
-	if (likely(b_ptr))
-		b_ptr->media->send_msg(net, buf, b_ptr, dest);
+	b = rcu_dereference_rtnl(tipc_net(net)->bearer_list[bearer_id]);
+	if (b)
+		mtu = b->mtu;
+	rcu_read_unlock();
+	return mtu;
+}
+
+/* tipc_bearer_xmit_skb - sends buffer to destination over bearer
+ */
+void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
+			  struct sk_buff *skb,
+			  struct tipc_media_addr *dest)
+{
+	struct tipc_net *tn = tipc_net(net);
+	struct tipc_bearer *b;
+
+	rcu_read_lock();
+	b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+	if (likely(b))
+		b->media->send_msg(net, skb, b, dest);
 	rcu_read_unlock();
 }
 
@@ -489,8 +488,31 @@
 		skb_queue_walk_safe(xmitq, skb, tmp) {
 			__skb_dequeue(xmitq);
 			b->media->send_msg(net, skb, b, dst);
-			/* Until we remove cloning in tipc_l2_send_msg(): */
-			kfree_skb(skb);
+		}
+	}
+	rcu_read_unlock();
+}
+
+/* tipc_bearer_bc_xmit() - broadcast buffers to all destinations
+ */
+void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
+			 struct sk_buff_head *xmitq)
+{
+	struct tipc_net *tn = tipc_net(net);
+	int net_id = tn->net_id;
+	struct tipc_bearer *b;
+	struct sk_buff *skb, *tmp;
+	struct tipc_msg *hdr;
+
+	rcu_read_lock();
+	b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+	if (likely(b)) {
+		skb_queue_walk_safe(xmitq, skb, tmp) {
+			hdr = buf_msg(skb);
+			msg_set_non_seq(hdr, 1);
+			msg_set_mc_netid(hdr, net_id);
+			__skb_dequeue(xmitq);
+			b->media->send_msg(net, skb, b, &b->bcast_addr);
 		}
 	}
 	rcu_read_unlock();
@@ -554,7 +576,7 @@
 	case NETDEV_CHANGE:
 		if (netif_carrier_ok(dev))
 			break;
-	case NETDEV_DOWN:
+	case NETDEV_GOING_DOWN:
 	case NETDEV_CHANGEMTU:
 		tipc_reset_bearer(net, b_ptr);
 		break;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 6426f24..552185b 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -163,6 +163,7 @@
 	u32 identity;
 	struct tipc_link_req *link_req;
 	char net_plane;
+	int node_cnt;
 	struct tipc_node_map nodes;
 };
 
@@ -215,10 +216,14 @@
 int tipc_bearer_setup(void);
 void tipc_bearer_cleanup(void);
 void tipc_bearer_stop(struct net *net);
-void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
-		      struct tipc_media_addr *dest);
+int tipc_bearer_mtu(struct net *net, u32 bearer_id);
+void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
+			  struct sk_buff *skb,
+			  struct tipc_media_addr *dest);
 void tipc_bearer_xmit(struct net *net, u32 bearer_id,
 		      struct sk_buff_head *xmitq,
 		      struct tipc_media_addr *dst);
+void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
+			 struct sk_buff_head *xmitq);
 
 #endif	/* _TIPC_BEARER_H */
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 005ba5e..03a8428 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -42,6 +42,7 @@
 #include "bearer.h"
 #include "net.h"
 #include "socket.h"
+#include "bcast.h"
 
 #include <linux/module.h>
 
@@ -71,8 +72,15 @@
 	err = tipc_topsrv_start(net);
 	if (err)
 		goto out_subscr;
+
+	err = tipc_bcast_init(net);
+	if (err)
+		goto out_bclink;
+
 	return 0;
 
+out_bclink:
+	tipc_bcast_stop(net);
 out_subscr:
 	tipc_nametbl_stop(net);
 out_nametbl:
@@ -85,6 +93,7 @@
 {
 	tipc_topsrv_stop(net);
 	tipc_net_stop(net);
+	tipc_bcast_stop(net);
 	tipc_nametbl_stop(net);
 	tipc_sk_rht_destroy(net);
 }
diff --git a/net/tipc/core.h b/net/tipc/core.h
index b96b41e..18e95a8 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -62,8 +62,7 @@
 
 struct tipc_node;
 struct tipc_bearer;
-struct tipc_bcbearer;
-struct tipc_bclink;
+struct tipc_bc_base;
 struct tipc_link;
 struct tipc_name_table;
 struct tipc_server;
@@ -93,8 +92,8 @@
 	struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
 
 	/* Broadcast link */
-	struct tipc_bcbearer *bcbearer;
-	struct tipc_bclink *bclink;
+	spinlock_t bclock;
+	struct tipc_bc_base *bcbase;
 	struct tipc_link *bcl;
 
 	/* Socket hash table */
@@ -114,6 +113,11 @@
 	return net_generic(net, tipc_net_id);
 }
 
+static inline int tipc_netid(struct net *net)
+{
+	return tipc_net(net)->net_id;
+}
+
 static inline u16 mod(u16 x)
 {
 	return x & 0xffffu;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index d14e0a4..afe8c47 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -89,7 +89,7 @@
 		      MAX_H_SIZE, dest_domain);
 	msg_set_non_seq(msg, 1);
 	msg_set_node_sig(msg, tn->random);
-	msg_set_node_capabilities(msg, 0);
+	msg_set_node_capabilities(msg, TIPC_NODE_CAPABILITIES);
 	msg_set_dest_domain(msg, dest_domain);
 	msg_set_bc_netid(msg, tn->net_id);
 	b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
@@ -167,11 +167,10 @@
 	/* Send response, if necessary */
 	if (respond && (mtyp == DSC_REQ_MSG)) {
 		rskb = tipc_buf_acquire(MAX_H_SIZE);
-		if (rskb) {
-			tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
-			tipc_bearer_send(net, bearer->identity, rskb, &maddr);
-			kfree_skb(rskb);
-		}
+		if (!rskb)
+			return;
+		tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
+		tipc_bearer_xmit_skb(net, bearer->identity, rskb, &maddr);
 	}
 }
 
@@ -225,6 +224,7 @@
 static void disc_timeout(unsigned long data)
 {
 	struct tipc_link_req *req = (struct tipc_link_req *)data;
+	struct sk_buff *skb;
 	int max_delay;
 
 	spin_lock_bh(&req->lock);
@@ -242,9 +242,9 @@
 	 * hold at fast polling rate if don't have any associated nodes,
 	 * otherwise hold at slow polling rate
 	 */
-	tipc_bearer_send(req->net, req->bearer_id, req->buf, &req->dest);
-
-
+	skb = skb_clone(req->buf, GFP_ATOMIC);
+	if (skb)
+		tipc_bearer_xmit_skb(req->net, req->bearer_id, skb, &req->dest);
 	req->timer_intv *= 2;
 	if (req->num_nodes)
 		max_delay = TIPC_LINK_REQ_SLOW;
@@ -271,6 +271,7 @@
 		     struct tipc_media_addr *dest)
 {
 	struct tipc_link_req *req;
+	struct sk_buff *skb;
 
 	req = kmalloc(sizeof(*req), GFP_ATOMIC);
 	if (!req)
@@ -292,7 +293,9 @@
 	setup_timer(&req->timer, disc_timeout, (unsigned long)req);
 	mod_timer(&req->timer, jiffies + req->timer_intv);
 	b_ptr->link_req = req;
-	tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
+	skb = skb_clone(req->buf, GFP_ATOMIC);
+	if (skb)
+		tipc_bearer_xmit_skb(net, req->bearer_id, skb, &req->dest);
 	return 0;
 }
 
@@ -316,6 +319,7 @@
 void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr)
 {
 	struct tipc_link_req *req = b_ptr->link_req;
+	struct sk_buff *skb;
 
 	spin_lock_bh(&req->lock);
 	tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr);
@@ -325,6 +329,8 @@
 	req->num_nodes = 0;
 	req->timer_intv = TIPC_LINK_REQ_INIT;
 	mod_timer(&req->timer, jiffies + req->timer_intv);
-	tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
+	skb = skb_clone(req->buf, GFP_ATOMIC);
+	if (skb)
+		tipc_bearer_xmit_skb(net, req->bearer_id, skb, &req->dest);
 	spin_unlock_bh(&req->lock);
 }
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 75db07c..9efbdbd 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -50,6 +50,7 @@
  */
 static const char *link_co_err = "Link tunneling error, ";
 static const char *link_rst_msg = "Resetting link ";
+static const char tipc_bclink_name[] = "broadcast-link";
 
 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
 	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
@@ -75,6 +76,14 @@
 	[TIPC_NLA_PROP_WIN]		= { .type = NLA_U32 }
 };
 
+/* Send states for broadcast NACKs
+ */
+enum {
+	BC_NACK_SND_CONDITIONAL,
+	BC_NACK_SND_UNCONDITIONAL,
+	BC_NACK_SND_SUPPRESS,
+};
+
 /*
  * Interval between NACKs when packets arrive out of order
  */
@@ -110,7 +119,11 @@
 				      struct sk_buff_head *xmitq);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
-static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
+static void tipc_link_build_nack_msg(struct tipc_link *l,
+				     struct sk_buff_head *xmitq);
+static void tipc_link_build_bc_init_msg(struct tipc_link *l,
+					struct sk_buff_head *xmitq);
+static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
 
 /*
  *  Simple non-static link routines (i.e. referenced outside this file)
@@ -120,11 +133,21 @@
 	return link_is_up(l);
 }
 
+bool tipc_link_peer_is_down(struct tipc_link *l)
+{
+	return l->state == LINK_PEER_RESET;
+}
+
 bool tipc_link_is_reset(struct tipc_link *l)
 {
 	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
 }
 
+bool tipc_link_is_establishing(struct tipc_link *l)
+{
+	return l->state == LINK_ESTABLISHING;
+}
+
 bool tipc_link_is_synching(struct tipc_link *l)
 {
 	return l->state == LINK_SYNCHING;
@@ -140,11 +163,66 @@
 	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
 }
 
+static bool link_is_bc_sndlink(struct tipc_link *l)
+{
+	return !l->bc_sndlink;
+}
+
+static bool link_is_bc_rcvlink(struct tipc_link *l)
+{
+	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
+}
+
 int tipc_link_is_active(struct tipc_link *l)
 {
-	struct tipc_node *n = l->owner;
+	return l->active;
+}
 
-	return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
+void tipc_link_set_active(struct tipc_link *l, bool active)
+{
+	l->active = active;
+}
+
+void tipc_link_add_bc_peer(struct tipc_link *snd_l,
+			   struct tipc_link *uc_l,
+			   struct sk_buff_head *xmitq)
+{
+	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
+
+	snd_l->ackers++;
+	rcv_l->acked = snd_l->snd_nxt - 1;
+	tipc_link_build_bc_init_msg(uc_l, xmitq);
+}
+
+void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
+			      struct tipc_link *rcv_l,
+			      struct sk_buff_head *xmitq)
+{
+	u16 ack = snd_l->snd_nxt - 1;
+
+	snd_l->ackers--;
+	tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
+	tipc_link_reset(rcv_l);
+	rcv_l->state = LINK_RESET;
+	if (!snd_l->ackers) {
+		tipc_link_reset(snd_l);
+		__skb_queue_purge(xmitq);
+	}
+}
+
+int tipc_link_bc_peers(struct tipc_link *l)
+{
+	return l->ackers;
+}
+
+void tipc_link_set_mtu(struct tipc_link *l, int mtu)
+{
+	l->mtu = mtu;
+}
+
+int tipc_link_mtu(struct tipc_link *l)
+{
+	return l->mtu;
 }
 
 static u32 link_own_addr(struct tipc_link *l)
@@ -155,57 +233,72 @@
 /**
  * tipc_link_create - create a new link
  * @n: pointer to associated node
- * @b: pointer to associated bearer
+ * @if_name: associated interface name
+ * @bearer_id: id (index) of associated bearer
+ * @tolerance: link tolerance to be used by link
+ * @net_plane: network plane (A,B,c..) this link belongs to
+ * @mtu: mtu to be advertised by link
+ * @priority: priority to be used by link
+ * @window: send window to be used by link
+ * @session: session to be used by link
  * @ownnode: identity of own node
- * @peer: identity of peer node
- * @maddr: media address to be used
+ * @peer: node id of peer node
+ * @peer_caps: bitmap describing peer node capabilities
+ * @bc_sndlink: the namespace global link used for broadcast sending
+ * @bc_rcvlink: the peer specific link used for broadcast reception
  * @inputq: queue to put messages ready for delivery
  * @namedq: queue to put binding table update messages ready for delivery
  * @link: return value, pointer to put the created link
  *
  * Returns true if link was created, otherwise false
  */
-bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
-		      u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
-		      struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
+		      int tolerance, char net_plane, u32 mtu, int priority,
+		      int window, u32 session, u32 ownnode, u32 peer,
+		      u16 peer_caps,
+		      struct tipc_link *bc_sndlink,
+		      struct tipc_link *bc_rcvlink,
+		      struct sk_buff_head *inputq,
+		      struct sk_buff_head *namedq,
 		      struct tipc_link **link)
 {
 	struct tipc_link *l;
 	struct tipc_msg *hdr;
-	char *if_name;
 
 	l = kzalloc(sizeof(*l), GFP_ATOMIC);
 	if (!l)
 		return false;
 	*link = l;
-
-	/* Note: peer i/f name is completed by reset/activate message */
-	if_name = strchr(b->name, ':') + 1;
-	sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
-		tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
-		if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
-
-	l->addr = peer;
-	l->media_addr = maddr;
-	l->owner = n;
-	l->peer_session = WILDCARD_SESSION;
-	l->bearer_id = b->identity;
-	l->tolerance = b->tolerance;
-	l->net_plane = b->net_plane;
-	l->advertised_mtu = b->mtu;
-	l->mtu = b->mtu;
-	l->priority = b->priority;
-	tipc_link_set_queue_limits(l, b->window);
-	l->inputq = inputq;
-	l->namedq = namedq;
-	l->state = LINK_RESETTING;
 	l->pmsg = (struct tipc_msg *)&l->proto_msg;
 	hdr = l->pmsg;
 	tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
 	msg_set_size(hdr, sizeof(l->proto_msg));
 	msg_set_session(hdr, session);
 	msg_set_bearer_id(hdr, l->bearer_id);
+
+	/* Note: peer i/f name is completed by reset/activate message */
+	sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
+		tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
+		if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
 	strcpy((char *)msg_data(hdr), if_name);
+
+	l->addr = peer;
+	l->peer_caps = peer_caps;
+	l->net = net;
+	l->peer_session = WILDCARD_SESSION;
+	l->bearer_id = bearer_id;
+	l->tolerance = tolerance;
+	l->net_plane = net_plane;
+	l->advertised_mtu = mtu;
+	l->mtu = mtu;
+	l->priority = priority;
+	tipc_link_set_queue_limits(l, window);
+	l->ackers = 1;
+	l->bc_sndlink = bc_sndlink;
+	l->bc_rcvlink = bc_rcvlink;
+	l->inputq = inputq;
+	l->namedq = namedq;
+	l->state = LINK_RESETTING;
 	__skb_queue_head_init(&l->transmq);
 	__skb_queue_head_init(&l->backlogq);
 	__skb_queue_head_init(&l->deferdq);
@@ -214,27 +307,43 @@
 	return true;
 }
 
-/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
+/**
+ * tipc_link_bc_create - create new link to be used for broadcast
+ * @n: pointer to associated node
+ * @mtu: mtu to be used
+ * @window: send window to be used
+ * @inputq: queue to put messages ready for delivery
+ * @namedq: queue to put binding table update messages ready for delivery
+ * @link: return value, pointer to put the created link
  *
- * Give a newly added peer node the sequence number where it should
- * start receiving and acking broadcast packets.
+ * Returns true if link was created, otherwise false
  */
-void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
-				    struct sk_buff_head *xmitq)
+bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
+			 int mtu, int window, u16 peer_caps,
+			 struct sk_buff_head *inputq,
+			 struct sk_buff_head *namedq,
+			 struct tipc_link *bc_sndlink,
+			 struct tipc_link **link)
 {
-	struct sk_buff *skb;
-	struct sk_buff_head list;
-	u16 last_sent;
+	struct tipc_link *l;
 
-	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
-			      0, l->addr, link_own_addr(l), 0, 0, 0);
-	if (!skb)
-		return;
-	last_sent = tipc_bclink_get_last_sent(l->owner->net);
-	msg_set_last_bcast(buf_msg(skb), last_sent);
-	__skb_queue_head_init(&list);
-	__skb_queue_tail(&list, skb);
-	tipc_link_xmit(l, &list, xmitq);
+	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
+			      0, ownnode, peer, peer_caps, bc_sndlink,
+			      NULL, inputq, namedq, link))
+		return false;
+
+	l = *link;
+	strcpy(l->name, tipc_bclink_name);
+	tipc_link_reset(l);
+	l->state = LINK_RESET;
+	l->ackers = 0;
+	l->bc_rcvlink = l;
+
+	/* Broadcast send link is always up */
+	if (link_is_bc_sndlink(l))
+		l->state = LINK_ESTABLISHED;
+
+	return true;
 }
 
 /**
@@ -321,14 +430,15 @@
 		switch (evt) {
 		case LINK_ESTABLISH_EVT:
 			l->state = LINK_ESTABLISHED;
-			rc |= TIPC_LINK_UP_EVT;
 			break;
 		case LINK_FAILOVER_BEGIN_EVT:
 			l->state = LINK_FAILINGOVER;
 			break;
-		case LINK_PEER_RESET_EVT:
 		case LINK_RESET_EVT:
+			l->state = LINK_RESET;
+			break;
 		case LINK_FAILURE_EVT:
+		case LINK_PEER_RESET_EVT:
 		case LINK_SYNCH_BEGIN_EVT:
 		case LINK_FAILOVER_END_EVT:
 			break;
@@ -440,12 +550,17 @@
 
 /* tipc_link_timeout - perform periodic task as instructed from node timeout
  */
+/* tipc_link_timeout - perform periodic task as instructed from node timeout
+ */
 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
 	int rc = 0;
 	int mtyp = STATE_MSG;
 	bool xmit = false;
 	bool prb = false;
+	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
+	u16 bc_acked = l->bc_rcvlink->acked;
+	bool bc_up = link_is_up(l->bc_rcvlink);
 
 	link_profile_stats(l);
 
@@ -453,7 +568,7 @@
 	case LINK_ESTABLISHED:
 	case LINK_SYNCHING:
 		if (!l->silent_intv_cnt) {
-			if (tipc_bclink_acks_missing(l->owner))
+			if (bc_up && (bc_acked != bc_snt))
 				xmit = true;
 		} else if (l->silent_intv_cnt <= l->abort_limit) {
 			xmit = true;
@@ -544,42 +659,8 @@
 	}
 }
 
-/**
- * tipc_link_reset_fragments - purge link's inbound message fragments queue
- * @l_ptr: pointer to link
- */
-void tipc_link_reset_fragments(struct tipc_link *l_ptr)
-{
-	kfree_skb(l_ptr->reasm_buf);
-	l_ptr->reasm_buf = NULL;
-}
-
-void tipc_link_purge_backlog(struct tipc_link *l)
-{
-	__skb_queue_purge(&l->backlogq);
-	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
-	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
-	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
-	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
-	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
-}
-
-/**
- * tipc_link_purge_queues - purge all pkt queues associated with link
- * @l_ptr: pointer to link
- */
-void tipc_link_purge_queues(struct tipc_link *l_ptr)
-{
-	__skb_queue_purge(&l_ptr->deferdq);
-	__skb_queue_purge(&l_ptr->transmq);
-	tipc_link_purge_backlog(l_ptr);
-	tipc_link_reset_fragments(l_ptr);
-}
-
 void tipc_link_reset(struct tipc_link *l)
 {
-	tipc_link_fsm_evt(l, LINK_RESET_EVT);
-
 	/* Link is down, accept any session */
 	l->peer_session = WILDCARD_SESSION;
 
@@ -589,12 +670,16 @@
 	/* Prepare for renewed mtu size negotiation */
 	l->mtu = l->advertised_mtu;
 
-	/* Clean up all queues: */
+	/* Clean up all queues and counters: */
 	__skb_queue_purge(&l->transmq);
 	__skb_queue_purge(&l->deferdq);
 	skb_queue_splice_init(&l->wakeupq, l->inputq);
-
-	tipc_link_purge_backlog(l);
+	__skb_queue_purge(&l->backlogq);
+	l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
+	l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
+	l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
+	l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
+	l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
 	kfree_skb(l->reasm_buf);
 	kfree_skb(l->failover_reasm_skb);
 	l->reasm_buf = NULL;
@@ -602,81 +687,15 @@
 	l->rcv_unacked = 0;
 	l->snd_nxt = 1;
 	l->rcv_nxt = 1;
+	l->acked = 0;
 	l->silent_intv_cnt = 0;
 	l->stats.recv_info = 0;
 	l->stale_count = 0;
+	l->bc_peer_is_up = false;
 	link_reset_statistics(l);
 }
 
 /**
- * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
- * @link: link to use
- * @list: chain of buffers containing message
- *
- * Consumes the buffer chain, except when returning an error code,
- * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
- * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
- */
-int __tipc_link_xmit(struct net *net, struct tipc_link *link,
-		     struct sk_buff_head *list)
-{
-	struct tipc_msg *msg = buf_msg(skb_peek(list));
-	unsigned int maxwin = link->window;
-	unsigned int i, imp = msg_importance(msg);
-	uint mtu = link->mtu;
-	u16 ack = mod(link->rcv_nxt - 1);
-	u16 seqno = link->snd_nxt;
-	u16 bc_last_in = link->owner->bclink.last_in;
-	struct tipc_media_addr *addr = link->media_addr;
-	struct sk_buff_head *transmq = &link->transmq;
-	struct sk_buff_head *backlogq = &link->backlogq;
-	struct sk_buff *skb, *bskb;
-
-	/* Match msg importance against this and all higher backlog limits: */
-	for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
-		if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
-			return link_schedule_user(link, list);
-	}
-	if (unlikely(msg_size(msg) > mtu))
-		return -EMSGSIZE;
-
-	/* Prepare each packet for sending, and add to relevant queue: */
-	while (skb_queue_len(list)) {
-		skb = skb_peek(list);
-		msg = buf_msg(skb);
-		msg_set_seqno(msg, seqno);
-		msg_set_ack(msg, ack);
-		msg_set_bcast_ack(msg, bc_last_in);
-
-		if (likely(skb_queue_len(transmq) < maxwin)) {
-			__skb_dequeue(list);
-			__skb_queue_tail(transmq, skb);
-			tipc_bearer_send(net, link->bearer_id, skb, addr);
-			link->rcv_unacked = 0;
-			seqno++;
-			continue;
-		}
-		if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
-			kfree_skb(__skb_dequeue(list));
-			link->stats.sent_bundled++;
-			continue;
-		}
-		if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
-			kfree_skb(__skb_dequeue(list));
-			__skb_queue_tail(backlogq, bskb);
-			link->backlog[msg_importance(buf_msg(bskb))].len++;
-			link->stats.sent_bundled++;
-			link->stats.sent_bundles++;
-			continue;
-		}
-		link->backlog[imp].len += skb_queue_len(list);
-		skb_queue_splice_tail_init(list, backlogq);
-	}
-	link->snd_nxt = seqno;
-	return 0;
-}
-
-/**
  * tipc_link_xmit(): enqueue buffer list according to queue situation
  * @link: link to use
  * @list: chain of buffers containing message
@@ -696,7 +715,7 @@
 	unsigned int mtu = l->mtu;
 	u16 ack = l->rcv_nxt - 1;
 	u16 seqno = l->snd_nxt;
-	u16 bc_last_in = l->owner->bclink.last_in;
+	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
 	struct sk_buff_head *transmq = &l->transmq;
 	struct sk_buff_head *backlogq = &l->backlogq;
 	struct sk_buff *skb, *_skb, *bskb;
@@ -715,7 +734,7 @@
 		hdr = buf_msg(skb);
 		msg_set_seqno(hdr, seqno);
 		msg_set_ack(hdr, ack);
-		msg_set_bcast_ack(hdr, bc_last_in);
+		msg_set_bcast_ack(hdr, bc_ack);
 
 		if (likely(skb_queue_len(transmq) < maxwin)) {
 			_skb = skb_clone(skb, GFP_ATOMIC);
@@ -724,6 +743,7 @@
 			__skb_dequeue(list);
 			__skb_queue_tail(transmq, skb);
 			__skb_queue_tail(xmitq, _skb);
+			TIPC_SKB_CB(skb)->ackers = l->ackers;
 			l->rcv_unacked = 0;
 			seqno++;
 			continue;
@@ -748,62 +768,13 @@
 	return 0;
 }
 
-/*
- * tipc_link_sync_rcv - synchronize broadcast link endpoints.
- * Receive the sequence number where we should start receiving and
- * acking broadcast packets from a newly added peer node, and open
- * up for reception of such packets.
- *
- * Called with node locked
- */
-static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
-{
-	struct tipc_msg *msg = buf_msg(buf);
-
-	n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
-	n->bclink.recv_permitted = true;
-	kfree_skb(buf);
-}
-
-/*
- * tipc_link_push_packets - push unsent packets to bearer
- *
- * Push out the unsent messages of a link where congestion
- * has abated. Node is locked.
- *
- * Called with node locked
- */
-void tipc_link_push_packets(struct tipc_link *link)
-{
-	struct sk_buff *skb;
-	struct tipc_msg *msg;
-	u16 seqno = link->snd_nxt;
-	u16 ack = mod(link->rcv_nxt - 1);
-
-	while (skb_queue_len(&link->transmq) < link->window) {
-		skb = __skb_dequeue(&link->backlogq);
-		if (!skb)
-			break;
-		msg = buf_msg(skb);
-		link->backlog[msg_importance(msg)].len--;
-		msg_set_ack(msg, ack);
-		msg_set_seqno(msg, seqno);
-		seqno = mod(seqno + 1);
-		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
-		link->rcv_unacked = 0;
-		__skb_queue_tail(&link->transmq, skb);
-		tipc_bearer_send(link->owner->net, link->bearer_id,
-				 skb, link->media_addr);
-	}
-	link->snd_nxt = seqno;
-}
-
 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
 	struct sk_buff *skb, *_skb;
 	struct tipc_msg *hdr;
 	u16 seqno = l->snd_nxt;
 	u16 ack = l->rcv_nxt - 1;
+	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
 
 	while (skb_queue_len(&l->transmq) < l->window) {
 		skb = skb_peek(&l->backlogq);
@@ -817,96 +788,35 @@
 		l->backlog[msg_importance(hdr)].len--;
 		__skb_queue_tail(&l->transmq, skb);
 		__skb_queue_tail(xmitq, _skb);
-		msg_set_ack(hdr, ack);
+		TIPC_SKB_CB(skb)->ackers = l->ackers;
 		msg_set_seqno(hdr, seqno);
-		msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+		msg_set_ack(hdr, ack);
+		msg_set_bcast_ack(hdr, bc_ack);
 		l->rcv_unacked = 0;
 		seqno++;
 	}
 	l->snd_nxt = seqno;
 }
 
-static void link_retransmit_failure(struct tipc_link *l_ptr,
-				    struct sk_buff *buf)
+static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
 {
-	struct tipc_msg *msg = buf_msg(buf);
-	struct net *net = l_ptr->owner->net;
+	struct tipc_msg *hdr = buf_msg(skb);
 
-	pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
-
-	if (l_ptr->addr) {
-		/* Handle failure on standard link */
-		link_print(l_ptr, "Resetting link ");
-		pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
-			msg_user(msg), msg_type(msg), msg_size(msg),
-			msg_errcode(msg));
-		pr_info("sqno %u, prev: %x, src: %x\n",
-			msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
-	} else {
-		/* Handle failure on broadcast link */
-		struct tipc_node *n_ptr;
-		char addr_string[16];
-
-		pr_info("Msg seq number: %u,  ", msg_seqno(msg));
-		pr_cont("Outstanding acks: %lu\n",
-			(unsigned long) TIPC_SKB_CB(buf)->handle);
-
-		n_ptr = tipc_bclink_retransmit_to(net);
-
-		tipc_addr_string_fill(addr_string, n_ptr->addr);
-		pr_info("Broadcast link info for %s\n", addr_string);
-		pr_info("Reception permitted: %d,  Acked: %u\n",
-			n_ptr->bclink.recv_permitted,
-			n_ptr->bclink.acked);
-		pr_info("Last in: %u,  Oos state: %u,  Last sent: %u\n",
-			n_ptr->bclink.last_in,
-			n_ptr->bclink.oos_state,
-			n_ptr->bclink.last_sent);
-
-		n_ptr->action_flags |= TIPC_BCAST_RESET;
-		l_ptr->stale_count = 0;
-	}
+	pr_warn("Retransmission failure on link <%s>\n", l->name);
+	link_print(l, "Resetting link ");
+	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
+	pr_info("sqno %u, prev: %x, src: %x\n",
+		msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
 }
 
-void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
-			  u32 retransmits)
-{
-	struct tipc_msg *msg;
-
-	if (!skb)
-		return;
-
-	msg = buf_msg(skb);
-
-	/* Detect repeated retransmit failures */
-	if (l_ptr->last_retransm == msg_seqno(msg)) {
-		if (++l_ptr->stale_count > 100) {
-			link_retransmit_failure(l_ptr, skb);
-			return;
-		}
-	} else {
-		l_ptr->last_retransm = msg_seqno(msg);
-		l_ptr->stale_count = 1;
-	}
-
-	skb_queue_walk_from(&l_ptr->transmq, skb) {
-		if (!retransmits)
-			break;
-		msg = buf_msg(skb);
-		msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
-		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-		tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
-				 l_ptr->media_addr);
-		retransmits--;
-		l_ptr->stats.retransmitted++;
-	}
-}
-
-static int tipc_link_retransm(struct tipc_link *l, int retransm,
-			      struct sk_buff_head *xmitq)
+int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
+		      struct sk_buff_head *xmitq)
 {
 	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
 	struct tipc_msg *hdr;
+	u16 ack = l->rcv_nxt - 1;
+	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
 
 	if (!skb)
 		return 0;
@@ -919,19 +829,25 @@
 		link_retransmit_failure(l, skb);
 		return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
 	}
+
+	/* Move forward to where retransmission should start */
 	skb_queue_walk(&l->transmq, skb) {
-		if (!retransm)
-			return 0;
+		if (!less(buf_seqno(skb), from))
+			break;
+	}
+
+	skb_queue_walk_from(&l->transmq, skb) {
+		if (more(buf_seqno(skb), to))
+			break;
 		hdr = buf_msg(skb);
 		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
 		if (!_skb)
 			return 0;
 		hdr = buf_msg(_skb);
-		msg_set_ack(hdr, l->rcv_nxt - 1);
-		msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+		msg_set_ack(hdr, ack);
+		msg_set_bcast_ack(hdr, bc_ack);
 		_skb->priority = TC_PRIO_CONTROL;
 		__skb_queue_tail(xmitq, _skb);
-		retransm--;
 		l->stats.retransmitted++;
 	}
 	return 0;
@@ -942,22 +858,20 @@
  * Consumes buffer if message is of right type
  * Node lock must be held
  */
-static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
+static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
 			    struct sk_buff_head *inputq)
 {
-	struct tipc_node *node = link->owner;
-
 	switch (msg_user(buf_msg(skb))) {
 	case TIPC_LOW_IMPORTANCE:
 	case TIPC_MEDIUM_IMPORTANCE:
 	case TIPC_HIGH_IMPORTANCE:
 	case TIPC_CRITICAL_IMPORTANCE:
 	case CONN_MANAGER:
-		__skb_queue_tail(inputq, skb);
+		skb_queue_tail(inputq, skb);
 		return true;
 	case NAME_DISTRIBUTOR:
-		node->bclink.recv_permitted = true;
-		skb_queue_tail(link->namedq, skb);
+		l->bc_rcvlink->state = LINK_ESTABLISHED;
+		skb_queue_tail(l->namedq, skb);
 		return true;
 	case MSG_BUNDLER:
 	case TUNNEL_PROTOCOL:
@@ -978,10 +892,10 @@
 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
 			   struct sk_buff_head *inputq)
 {
-	struct tipc_node *node = l->owner;
 	struct tipc_msg *hdr = buf_msg(skb);
 	struct sk_buff **reasm_skb = &l->reasm_buf;
 	struct sk_buff *iskb;
+	struct sk_buff_head tmpq;
 	int usr = msg_user(hdr);
 	int rc = 0;
 	int pos = 0;
@@ -1006,23 +920,27 @@
 	}
 
 	if (usr == MSG_BUNDLER) {
+		skb_queue_head_init(&tmpq);
 		l->stats.recv_bundles++;
 		l->stats.recv_bundled += msg_msgcnt(hdr);
 		while (tipc_msg_extract(skb, &iskb, &pos))
-			tipc_data_input(l, iskb, inputq);
+			tipc_data_input(l, iskb, &tmpq);
+		tipc_skb_queue_splice_tail(&tmpq, inputq);
 		return 0;
 	} else if (usr == MSG_FRAGMENTER) {
 		l->stats.recv_fragments++;
 		if (tipc_buf_append(reasm_skb, &skb)) {
 			l->stats.recv_fragmented++;
 			tipc_data_input(l, skb, inputq);
-		} else if (!*reasm_skb) {
+		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
+			pr_warn_ratelimited("Unable to build fragment list\n");
 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
 		}
 		return 0;
 	} else if (usr == BCAST_PROTOCOL) {
-		tipc_link_sync_rcv(node, skb);
-		return 0;
+		tipc_bcast_lock(l->net);
+		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
+		tipc_bcast_unlock(l->net);
 	}
 drop:
 	kfree_skb(skb);
@@ -1044,49 +962,95 @@
 	return released;
 }
 
+/* tipc_link_build_ack_msg: prepare link acknowledge message for transmission
+ *
+ * Note that sending of broadcast ack is coordinated among nodes, to reduce
+ * risk of ack storms towards the sender
+ */
+int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
+{
+	if (!l)
+		return 0;
+
+	/* Broadcast ACK must be sent via a unicast link => defer to caller */
+	if (link_is_bc_rcvlink(l)) {
+		if (((l->rcv_nxt ^ link_own_addr(l)) & 0xf) != 0xf)
+			return 0;
+		l->rcv_unacked = 0;
+		return TIPC_LINK_SND_BC_ACK;
+	}
+
+	/* Unicast ACK */
+	l->rcv_unacked = 0;
+	l->stats.sent_acks++;
+	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
+	return 0;
+}
+
+/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
+ */
+void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
+{
+	int mtyp = RESET_MSG;
+
+	if (l->state == LINK_ESTABLISHING)
+		mtyp = ACTIVATE_MSG;
+
+	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
+}
+
+/* tipc_link_build_nack_msg: prepare link nack message for transmission
+ */
+static void tipc_link_build_nack_msg(struct tipc_link *l,
+				     struct sk_buff_head *xmitq)
+{
+	u32 def_cnt = ++l->stats.deferred_recv;
+
+	if (link_is_bc_rcvlink(l))
+		return;
+
+	if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
+		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
+}
+
 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
- * @link: the link that should handle the message
+ * @l: the link that should handle the message
  * @skb: TIPC packet
  * @xmitq: queue to place packets to be sent after this call
  */
 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
 		  struct sk_buff_head *xmitq)
 {
-	struct sk_buff_head *arrvq = &l->deferdq;
-	struct sk_buff_head tmpq;
+	struct sk_buff_head *defq = &l->deferdq;
 	struct tipc_msg *hdr;
-	u16 seqno, rcv_nxt;
+	u16 seqno, rcv_nxt, win_lim;
 	int rc = 0;
 
-	__skb_queue_head_init(&tmpq);
-
-	if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
-		if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
-			tipc_link_build_proto_msg(l, STATE_MSG, 0,
-						  0, 0, 0, xmitq);
-		return rc;
-	}
-
-	while ((skb = skb_peek(arrvq))) {
+	do {
 		hdr = buf_msg(skb);
+		seqno = msg_seqno(hdr);
+		rcv_nxt = l->rcv_nxt;
+		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
 
 		/* Verify and update link state */
-		if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
-			__skb_dequeue(arrvq);
-			rc = tipc_link_proto_rcv(l, skb, xmitq);
-			continue;
-		}
+		if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
+			return tipc_link_proto_rcv(l, skb, xmitq);
 
 		if (unlikely(!link_is_up(l))) {
-			rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
-			if (!link_is_up(l)) {
-				kfree_skb(__skb_dequeue(arrvq));
-				goto exit;
-			}
+			if (l->state == LINK_ESTABLISHING)
+				rc = TIPC_LINK_UP_EVT;
+			goto drop;
 		}
 
+		/* Don't send probe at next timeout expiration */
 		l->silent_intv_cnt = 0;
 
+		/* Drop if outside receive window */
+		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
+			l->stats.duplicates++;
+			goto drop;
+		}
+
 		/* Forward queues and wake up waiting users */
 		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
 			tipc_link_advance_backlog(l, xmitq);
@@ -1094,79 +1058,28 @@
 				link_prepare_wakeup(l);
 		}
 
-		/* Defer reception if there is a gap in the sequence */
-		seqno = msg_seqno(hdr);
-		rcv_nxt = l->rcv_nxt;
-		if (unlikely(less(rcv_nxt, seqno))) {
-			l->stats.deferred_recv++;
-			goto exit;
+		/* Defer delivery if sequence gap */
+		if (unlikely(seqno != rcv_nxt)) {
+			__tipc_skb_queue_sorted(defq, seqno, skb);
+			tipc_link_build_nack_msg(l, xmitq);
+			break;
 		}
 
-		__skb_dequeue(arrvq);
-
-		/* Drop if packet already received */
-		if (unlikely(more(rcv_nxt, seqno))) {
-			l->stats.duplicates++;
-			kfree_skb(skb);
-			goto exit;
-		}
-
-		/* Packet can be delivered */
+		/* Deliver packet */
 		l->rcv_nxt++;
 		l->stats.recv_info++;
-		if (unlikely(!tipc_data_input(l, skb, &tmpq)))
-			rc = tipc_link_input(l, skb, &tmpq);
-
-		/* Ack at regular intervals */
-		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
-			l->rcv_unacked = 0;
-			l->stats.sent_acks++;
-			tipc_link_build_proto_msg(l, STATE_MSG,
-						  0, 0, 0, 0, xmitq);
-		}
-	}
-exit:
-	tipc_skb_queue_splice_tail(&tmpq, l->inputq);
-	return rc;
-}
-
-/**
- * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
- *
- * Returns increase in queue length (i.e. 0 or 1)
- */
-u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
-{
-	struct sk_buff *skb1;
-	u16 seq_no = buf_seqno(skb);
-
-	/* Empty queue ? */
-	if (skb_queue_empty(list)) {
-		__skb_queue_tail(list, skb);
-		return 1;
-	}
-
-	/* Last ? */
-	if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
-		__skb_queue_tail(list, skb);
-		return 1;
-	}
-
-	/* Locate insertion point in queue, then insert; discard if duplicate */
-	skb_queue_walk(list, skb1) {
-		u16 curr_seqno = buf_seqno(skb1);
-
-		if (seq_no == curr_seqno) {
-			kfree_skb(skb);
-			return 0;
-		}
-
-		if (less(seq_no, curr_seqno))
+		if (!tipc_data_input(l, skb, l->inputq))
+			rc |= tipc_link_input(l, skb, l->inputq);
+		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
+			rc |= tipc_link_build_ack_msg(l, xmitq);
+		if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK))
 			break;
-	}
+	} while ((skb = __skb_dequeue(defq)));
 
-	__skb_queue_before(list, skb1, skb);
-	return 1;
+	return rc;
+drop:
+	kfree_skb(skb);
+	return rc;
 }
 
 /*
@@ -1184,23 +1097,17 @@
 	skb = __skb_dequeue(&xmitq);
 	if (!skb)
 		return;
-	tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr);
+	tipc_bearer_xmit_skb(l->net, l->bearer_id, skb, l->media_addr);
 	l->rcv_unacked = 0;
-	kfree_skb(skb);
 }
 
-/* tipc_link_build_proto_msg: prepare link protocol message for transmission
- */
 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
 				      u16 rcvgap, int tolerance, int priority,
 				      struct sk_buff_head *xmitq)
 {
 	struct sk_buff *skb = NULL;
 	struct tipc_msg *hdr = l->pmsg;
-	u16 snd_nxt = l->snd_nxt;
-	u16 rcv_nxt = l->rcv_nxt;
-	u16 rcv_last = rcv_nxt - 1;
-	int node_up = l->owner->bclink.recv_permitted;
+	bool node_up = link_is_up(l->bc_rcvlink);
 
 	/* Don't send protocol message during reset or link failover */
 	if (tipc_link_is_blocked(l))
@@ -1208,33 +1115,34 @@
 
 	msg_set_type(hdr, mtyp);
 	msg_set_net_plane(hdr, l->net_plane);
-	msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
-	msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
+	msg_set_next_sent(hdr, l->snd_nxt);
+	msg_set_ack(hdr, l->rcv_nxt - 1);
+	msg_set_bcast_ack(hdr, l->bc_rcvlink->rcv_nxt - 1);
+	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
 	msg_set_link_tolerance(hdr, tolerance);
 	msg_set_linkprio(hdr, priority);
 	msg_set_redundant_link(hdr, node_up);
 	msg_set_seq_gap(hdr, 0);
 
 	/* Compatibility: created msg must not be in sequence with pkt flow */
-	msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
+	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
 
 	if (mtyp == STATE_MSG) {
 		if (!tipc_link_is_up(l))
 			return;
-		msg_set_next_sent(hdr, snd_nxt);
 
 		/* Override rcvgap if there are packets in deferred queue */
 		if (!skb_queue_empty(&l->deferdq))
-			rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
+			rcvgap = buf_seqno(skb_peek(&l->deferdq)) - l->rcv_nxt;
 		if (rcvgap) {
 			msg_set_seq_gap(hdr, rcvgap);
 			l->stats.sent_nacks++;
 		}
-		msg_set_ack(hdr, rcv_last);
 		msg_set_probe(hdr, probe);
 		if (probe)
 			l->stats.sent_probes++;
 		l->stats.sent_states++;
+		l->rcv_unacked = 0;
 	} else {
 		/* RESET_MSG or ACTIVATE_MSG */
 		msg_set_max_pkt(hdr, l->advertised_mtu);
@@ -1250,7 +1158,7 @@
 }
 
 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
- * with contents of the link's tranmsit and backlog queues.
+ * with contents of the link's transmit and backlog queues.
  */
 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
 			   int mtyp, struct sk_buff_head *xmitq)
@@ -1326,21 +1234,23 @@
 {
 	struct tipc_msg *hdr = buf_msg(skb);
 	u16 rcvgap = 0;
-	u16 nacked_gap = msg_seq_gap(hdr);
+	u16 ack = msg_ack(hdr);
+	u16 gap = msg_seq_gap(hdr);
 	u16 peers_snd_nxt =  msg_next_sent(hdr);
 	u16 peers_tol = msg_link_tolerance(hdr);
 	u16 peers_prio = msg_linkprio(hdr);
 	u16 rcv_nxt = l->rcv_nxt;
+	int mtyp = msg_type(hdr);
 	char *if_name;
 	int rc = 0;
 
-	if (tipc_link_is_blocked(l))
+	if (tipc_link_is_blocked(l) || !xmitq)
 		goto exit;
 
 	if (link_own_addr(l) > msg_prevnode(hdr))
 		l->net_plane = msg_net_plane(hdr);
 
-	switch (msg_type(hdr)) {
+	switch (mtyp) {
 	case RESET_MSG:
 
 		/* Ignore duplicate RESET with old session number */
@@ -1367,12 +1277,14 @@
 		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
 			l->priority = peers_prio;
 
-		if (msg_type(hdr) == RESET_MSG) {
-			rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
-		} else if (!link_is_up(l)) {
-			tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
-			rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
-		}
+		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
+		if ((mtyp == RESET_MSG) || !link_is_up(l))
+			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+
+		/* ACTIVATE_MSG takes up link if it was already locally reset */
+		if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
+			rc = TIPC_LINK_UP_EVT;
+
 		l->peer_session = msg_session(hdr);
 		l->peer_bearer_id = msg_bearer_id(hdr);
 		if (l->mtu > msg_max_pkt(hdr))
@@ -1389,9 +1301,12 @@
 		l->stats.recv_states++;
 		if (msg_probe(hdr))
 			l->stats.recv_probes++;
-		rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
-		if (!link_is_up(l))
+
+		if (!link_is_up(l)) {
+			if (l->state == LINK_ESTABLISHING)
+				rc = TIPC_LINK_UP_EVT;
 			break;
+		}
 
 		/* Send NACK if peer has sent pkts we haven't received yet */
 		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
@@ -1399,11 +1314,11 @@
 		if (rcvgap || (msg_probe(hdr)))
 			tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
 						  0, 0, xmitq);
-		tipc_link_release_pkts(l, msg_ack(hdr));
+		tipc_link_release_pkts(l, ack);
 
 		/* If NACK, retransmit will now start at right position */
-		if (nacked_gap) {
-			rc = tipc_link_retransm(l, nacked_gap, xmitq);
+		if (gap) {
+			rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
 			l->stats.recv_nacks++;
 		}
 
@@ -1416,6 +1331,188 @@
 	return rc;
 }
 
+/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
+ */
+static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
+					 u16 peers_snd_nxt,
+					 struct sk_buff_head *xmitq)
+{
+	struct sk_buff *skb;
+	struct tipc_msg *hdr;
+	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
+	u16 ack = l->rcv_nxt - 1;
+	u16 gap_to = peers_snd_nxt - 1;
+
+	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
+			      0, l->addr, link_own_addr(l), 0, 0, 0);
+	if (!skb)
+		return false;
+	hdr = buf_msg(skb);
+	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
+	msg_set_bcast_ack(hdr, ack);
+	msg_set_bcgap_after(hdr, ack);
+	if (dfrd_skb)
+		gap_to = buf_seqno(dfrd_skb) - 1;
+	msg_set_bcgap_to(hdr, gap_to);
+	msg_set_non_seq(hdr, bcast);
+	__skb_queue_tail(xmitq, skb);
+	return true;
+}
+
+/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
+ *
+ * Give a newly added peer node the sequence number where it should
+ * start receiving and acking broadcast packets.
+ */
+static void tipc_link_build_bc_init_msg(struct tipc_link *l,
+					struct sk_buff_head *xmitq)
+{
+	struct sk_buff_head list;
+
+	__skb_queue_head_init(&list);
+	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
+		return;
+	tipc_link_xmit(l, &list, xmitq);
+}
+
+/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
+ */
+void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
+{
+	int mtyp = msg_type(hdr);
+	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
+
+	if (link_is_up(l))
+		return;
+
+	if (msg_user(hdr) == BCAST_PROTOCOL) {
+		l->rcv_nxt = peers_snd_nxt;
+		l->state = LINK_ESTABLISHED;
+		return;
+	}
+
+	if (l->peer_caps & TIPC_BCAST_SYNCH)
+		return;
+
+	if (msg_peer_node_is_up(hdr))
+		return;
+
+	/* Compatibility: accept older, less safe initial synch data */
+	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
+		l->rcv_nxt = peers_snd_nxt;
+}
+
+/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
+ */
+void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
+			   struct sk_buff_head *xmitq)
+{
+	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
+
+	if (!link_is_up(l))
+		return;
+
+	if (!msg_peer_node_is_up(hdr))
+		return;
+
+	l->bc_peer_is_up = true;
+
+	/* Ignore if peers_snd_nxt goes beyond receive window */
+	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
+		return;
+
+	if (!more(peers_snd_nxt, l->rcv_nxt)) {
+		l->nack_state = BC_NACK_SND_CONDITIONAL;
+		return;
+	}
+
+	/* Don't NACK if one was recently sent or peeked */
+	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
+		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
+		return;
+	}
+
+	/* Conditionally delay NACK sending until next synch rcv */
+	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
+		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
+		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
+			return;
+	}
+
+	/* Send NACK now but suppress next one */
+	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
+	l->nack_state = BC_NACK_SND_SUPPRESS;
+}
+
+void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
+			  struct sk_buff_head *xmitq)
+{
+	struct sk_buff *skb, *tmp;
+	struct tipc_link *snd_l = l->bc_sndlink;
+
+	if (!link_is_up(l) || !l->bc_peer_is_up)
+		return;
+
+	if (!more(acked, l->acked))
+		return;
+
+	/* Skip over packets peer has already acked */
+	skb_queue_walk(&snd_l->transmq, skb) {
+		if (more(buf_seqno(skb), l->acked))
+			break;
+	}
+
+	/* Update/release the packets peer is acking now */
+	skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
+		if (more(buf_seqno(skb), acked))
+			break;
+		if (!--TIPC_SKB_CB(skb)->ackers) {
+			__skb_unlink(skb, &snd_l->transmq);
+			kfree_skb(skb);
+		}
+	}
+	l->acked = acked;
+	tipc_link_advance_backlog(snd_l, xmitq);
+	if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
+		link_prepare_wakeup(snd_l);
+}
+
+/* tipc_link_bc_nack_rcv(): receive broadcast nack message
+ */
+int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
+			  struct sk_buff_head *xmitq)
+{
+	struct tipc_msg *hdr = buf_msg(skb);
+	u32 dnode = msg_destnode(hdr);
+	int mtyp = msg_type(hdr);
+	u16 acked = msg_bcast_ack(hdr);
+	u16 from = acked + 1;
+	u16 to = msg_bcgap_to(hdr);
+	u16 peers_snd_nxt = to + 1;
+	int rc = 0;
+
+	kfree_skb(skb);
+
+	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
+		return 0;
+
+	if (mtyp != STATE_MSG)
+		return 0;
+
+	if (dnode == link_own_addr(l)) {
+		tipc_link_bc_ack_rcv(l, acked, xmitq);
+		rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
+		l->stats.recv_nacks++;
+		return rc;
+	}
+
+	/* Msg for other node => suppress own NACK at next sync if applicable */
+	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
+		l->nack_state = BC_NACK_SND_SUPPRESS;
+
+	return 0;
+}
+
 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
 {
 	int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
@@ -1480,7 +1577,7 @@
 static void link_print(struct tipc_link *l, const char *str)
 {
 	struct sk_buff *hskb = skb_peek(&l->transmq);
-	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
+	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
 	u16 tail = l->snd_nxt - 1;
 
 	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
@@ -1704,7 +1801,7 @@
 	if (tipc_link_is_up(link))
 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
 			goto attr_msg_full;
-	if (tipc_link_is_active(link))
+	if (link->active)
 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
 			goto attr_msg_full;
 
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 39ff8b6..66d859b 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -66,7 +66,8 @@
  */
 enum {
 	TIPC_LINK_UP_EVT       = 1,
-	TIPC_LINK_DOWN_EVT     = (1 << 1)
+	TIPC_LINK_DOWN_EVT     = (1 << 1),
+	TIPC_LINK_SND_BC_ACK   = (1 << 2)
 };
 
 /* Starting value for maximum packet size negotiation on unicast links
@@ -110,7 +111,7 @@
  * @name: link name character string
  * @media_addr: media address to use when sending messages over link
  * @timer: link timer
- * @owner: pointer to peer node
+ * @net: pointer to namespace struct
  * @refcnt: reference counter for permanent references (owner node & timer)
  * @peer_session: link session # being used by peer end of link
  * @peer_bearer_id: bearer id used by link's peer endpoint
@@ -119,6 +120,7 @@
  * @keepalive_intv: link keepalive timer interval
  * @abort_limit: # of unacknowledged continuity probes needed to reset link
  * @state: current state of link FSM
+ * @peer_caps: bitmap describing capabilities of peer node
  * @silent_intv_cnt: # of timer intervals without any reception from peer
  * @proto_msg: template for control messages generated by link
  * @pmsg: convenience pointer to "proto_msg" field
@@ -134,6 +136,8 @@
  * @snt_nxt: next sequence number to use for outbound messages
  * @last_retransmitted: sequence number of most recently retransmitted message
  * @stale_count: # of identical retransmit requests made by peer
+ * @ackers: # of peers that needs to ack each packet before it can be released
+ * @acked: # last packet acked by a certain peer. Used for broadcast.
  * @rcv_nxt: next sequence number to expect for inbound messages
  * @deferred_queue: deferred queue saved OOS b'cast message received from node
  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
@@ -143,13 +147,14 @@
  * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
  * @reasm_buf: head of partially reassembled inbound message fragments
+ * @bc_rcvr: marks that this is a broadcast receiver link
  * @stats: collects statistics regarding link activity
  */
 struct tipc_link {
 	u32 addr;
 	char name[TIPC_MAX_LINK_NAME];
 	struct tipc_media_addr *media_addr;
-	struct tipc_node *owner;
+	struct net *net;
 
 	/* Management and link supervision data */
 	u32 peer_session;
@@ -159,6 +164,8 @@
 	unsigned long keepalive_intv;
 	u32 abort_limit;
 	u32 state;
+	u16 peer_caps;
+	bool active;
 	u32 silent_intv_cnt;
 	struct {
 		unchar hdr[INT_H_SIZE];
@@ -185,7 +192,7 @@
 	} backlog[5];
 	u16 snd_nxt;
 	u16 last_retransm;
-	u32 window;
+	u16 window;
 	u32 stale_count;
 
 	/* Reception */
@@ -201,42 +208,50 @@
 	/* Fragmentation/reassembly */
 	struct sk_buff *reasm_buf;
 
+	/* Broadcast */
+	u16 ackers;
+	u16 acked;
+	struct tipc_link *bc_rcvlink;
+	struct tipc_link *bc_sndlink;
+	int nack_state;
+	bool bc_peer_is_up;
+
 	/* Statistics */
 	struct tipc_stats stats;
 };
 
-bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
-		      u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
-		      struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
+		      int tolerance, char net_plane, u32 mtu, int priority,
+		      int window, u32 session, u32 ownnode, u32 peer,
+		      u16 peer_caps,
+		      struct tipc_link *bc_sndlink,
+		      struct tipc_link *bc_rcvlink,
+		      struct sk_buff_head *inputq,
+		      struct sk_buff_head *namedq,
 		      struct tipc_link **link);
+bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
+			 int mtu, int window, u16 peer_caps,
+			 struct sk_buff_head *inputq,
+			 struct sk_buff_head *namedq,
+			 struct tipc_link *bc_sndlink,
+			 struct tipc_link **link);
 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
 			   int mtyp, struct sk_buff_head *xmitq);
-void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
-				    struct sk_buff_head *xmitq);
+void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
 int tipc_link_fsm_evt(struct tipc_link *l, int evt);
 void tipc_link_reset_fragments(struct tipc_link *l_ptr);
 bool tipc_link_is_up(struct tipc_link *l);
+bool tipc_link_peer_is_down(struct tipc_link *l);
 bool tipc_link_is_reset(struct tipc_link *l);
+bool tipc_link_is_establishing(struct tipc_link *l);
 bool tipc_link_is_synching(struct tipc_link *l);
 bool tipc_link_is_failingover(struct tipc_link *l);
 bool tipc_link_is_blocked(struct tipc_link *l);
-int tipc_link_is_active(struct tipc_link *l_ptr);
-void tipc_link_purge_queues(struct tipc_link *l_ptr);
-void tipc_link_purge_backlog(struct tipc_link *l);
+void tipc_link_set_active(struct tipc_link *l, bool active);
 void tipc_link_reset(struct tipc_link *l_ptr);
-int __tipc_link_xmit(struct net *net, struct tipc_link *link,
-		     struct sk_buff_head *list);
 int tipc_link_xmit(struct tipc_link *link,	struct sk_buff_head *list,
 		   struct sk_buff_head *xmitq);
-void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
-			  u32 gap, u32 tolerance, u32 priority);
-void tipc_link_push_packets(struct tipc_link *l_ptr);
-u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf);
-void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
-void tipc_link_retransmit(struct tipc_link *l_ptr,
-			  struct sk_buff *start, u32 retransmits);
-struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
-				    const struct sk_buff *skb);
+void tipc_link_set_queue_limits(struct tipc_link *l, u32 window);
 
 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
@@ -246,5 +261,23 @@
 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
 		  struct sk_buff_head *xmitq);
-
+int tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
+void tipc_link_add_bc_peer(struct tipc_link *snd_l,
+			   struct tipc_link *uc_l,
+			   struct sk_buff_head *xmitq);
+void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
+			      struct tipc_link *rcv_l,
+			      struct sk_buff_head *xmitq);
+int tipc_link_bc_peers(struct tipc_link *l);
+void tipc_link_set_mtu(struct tipc_link *l, int mtu);
+int tipc_link_mtu(struct tipc_link *l);
+void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
+			  struct sk_buff_head *xmitq);
+void tipc_link_build_bc_sync_msg(struct tipc_link *l,
+				 struct sk_buff_head *xmitq);
+void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr);
+void tipc_link_bc_sync_rcv(struct tipc_link *l,   struct tipc_msg *hdr,
+			   struct sk_buff_head *xmitq);
+int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
+			  struct sk_buff_head *xmitq);
 #endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index c5ac436..8740930 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -121,7 +121,7 @@
 {
 	struct sk_buff *head = *headbuf;
 	struct sk_buff *frag = *buf;
-	struct sk_buff *tail;
+	struct sk_buff *tail = NULL;
 	struct tipc_msg *msg;
 	u32 fragid;
 	int delta;
@@ -141,9 +141,15 @@
 		if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
 			goto err;
 		head = *headbuf = frag;
-		skb_frag_list_init(head);
-		TIPC_SKB_CB(head)->tail = NULL;
 		*buf = NULL;
+		TIPC_SKB_CB(head)->tail = NULL;
+		if (skb_is_nonlinear(head)) {
+			skb_walk_frags(head, tail) {
+				TIPC_SKB_CB(head)->tail = tail;
+			}
+		} else {
+			skb_frag_list_init(head);
+		}
 		return 0;
 	}
 
@@ -176,7 +182,6 @@
 	*buf = NULL;
 	return 0;
 err:
-	pr_warn_ratelimited("Unable to build fragment list\n");
 	kfree_skb(*buf);
 	kfree_skb(*headbuf);
 	*buf = *headbuf = NULL;
@@ -559,18 +564,22 @@
 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
  *                         reassemble the clones into one message
  */
-struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
+bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
 {
-	struct sk_buff *skb;
+	struct sk_buff *skb, *_skb;
 	struct sk_buff *frag = NULL;
 	struct sk_buff *head = NULL;
-	int hdr_sz;
+	int hdr_len;
 
 	/* Copy header if single buffer */
 	if (skb_queue_len(list) == 1) {
 		skb = skb_peek(list);
-		hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
-		return __pskb_copy(skb, hdr_sz, GFP_ATOMIC);
+		hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
+		_skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
+		if (!_skb)
+			return false;
+		__skb_queue_tail(rcvq, _skb);
+		return true;
 	}
 
 	/* Clone all fragments and reassemble */
@@ -584,9 +593,41 @@
 		if (!head)
 			goto error;
 	}
-	return frag;
+	__skb_queue_tail(rcvq, frag);
+	return true;
 error:
 	pr_warn("Failed do clone local mcast rcv buffer\n");
 	kfree_skb(head);
-	return NULL;
+	return false;
+}
+
+/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
+ * @list: list to be appended to
+ * @seqno: sequence number of buffer to add
+ * @skb: buffer to add
+ */
+void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
+			     struct sk_buff *skb)
+{
+	struct sk_buff *_skb, *tmp;
+
+	if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
+		__skb_queue_head(list, skb);
+		return;
+	}
+
+	if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
+		__skb_queue_tail(list, skb);
+		return;
+	}
+
+	skb_queue_walk_safe(list, _skb, tmp) {
+		if (more(seqno, buf_seqno(_skb)))
+			continue;
+		if (seqno == buf_seqno(_skb))
+			break;
+		__skb_queue_before(list, _skb, skb);
+		return;
+	}
+	kfree_skb(skb);
 }
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index a82c584..55778a0 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -112,6 +112,7 @@
 	bool wakeup_pending;
 	u16 chain_sz;
 	u16 chain_imp;
+	u16 ackers;
 };
 
 #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
@@ -357,7 +358,7 @@
 	if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m)))
 		return usr;
 	if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))
-		return msg_bits(m, 5, 13, 0x7);
+		return msg_bits(m, 9, 0, 0x7);
 	return TIPC_SYSTEM_IMPORTANCE;
 }
 
@@ -366,7 +367,7 @@
 	int usr = msg_user(m);
 
 	if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)))
-		msg_set_bits(m, 5, 13, 0x7, i);
+		msg_set_bits(m, 9, 0, 0x7, i);
 	else if (i < TIPC_SYSTEM_IMPORTANCE)
 		msg_set_user(m, i);
 	else
@@ -600,6 +601,11 @@
 	return msg_bits(m, 4, 16, 0xffff);
 }
 
+static inline u32 msg_bc_snd_nxt(struct tipc_msg *m)
+{
+	return msg_last_bcast(m) + 1;
+}
+
 static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n)
 {
 	msg_set_bits(m, 4, 16, 0xffff, n);
@@ -789,7 +795,9 @@
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
 		   int offset, int dsz, int mtu, struct sk_buff_head *list);
 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
-struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
+bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq);
+void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
+			     struct sk_buff *skb);
 
 static inline u16 buf_seqno(struct sk_buff *skb)
 {
@@ -862,38 +870,6 @@
 	return skb;
 }
 
-/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
- * @list: list to be appended to
- * @skb: buffer to add
- * Returns true if queue should treated further, otherwise false
- */
-static inline bool __tipc_skb_queue_sorted(struct sk_buff_head *list,
-					   struct sk_buff *skb)
-{
-	struct sk_buff *_skb, *tmp;
-	struct tipc_msg *hdr = buf_msg(skb);
-	u16 seqno = msg_seqno(hdr);
-
-	if (skb_queue_empty(list) || (msg_user(hdr) == LINK_PROTOCOL)) {
-		__skb_queue_head(list, skb);
-		return true;
-	}
-	if (likely(less(seqno, buf_seqno(skb_peek(list))))) {
-		__skb_queue_head(list, skb);
-		return true;
-	}
-	if (!more(seqno, buf_seqno(skb_peek_tail(list)))) {
-		skb_queue_walk_safe(list, _skb, tmp) {
-			if (likely(less(seqno, buf_seqno(_skb)))) {
-				__skb_queue_before(list, _skb, skb);
-				return true;
-			}
-		}
-	}
-	__skb_queue_tail(list, skb);
-	return false;
-}
-
 /* tipc_skb_queue_splice_tail - append an skb list to lock protected list
  * @list: the new list to append. Not lock protected
  * @head: target list. Lock protected.
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index e6018b7..c07612b 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -102,7 +102,7 @@
 		if (!oskb)
 			break;
 		msg_set_destnode(buf_msg(oskb), dnode);
-		tipc_node_xmit_skb(net, oskb, dnode, dnode);
+		tipc_node_xmit_skb(net, oskb, dnode, 0);
 	}
 	rcu_read_unlock();
 
@@ -223,7 +223,7 @@
 			 &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
 	rcu_read_unlock();
 
-	tipc_node_xmit(net, &head, dnode, dnode);
+	tipc_node_xmit(net, &head, dnode, 0);
 }
 
 static void tipc_publ_subscribe(struct net *net, struct publication *publ,
diff --git a/net/tipc/net.c b/net/tipc/net.c
index d6d1399..77bf911 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -112,14 +112,11 @@
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	char addr_string[16];
-	int res;
 
 	tn->own_addr = addr;
 	tipc_named_reinit(net);
 	tipc_sk_reinit(net);
-	res = tipc_bclink_init(net);
-	if (res)
-		return res;
+	tipc_bcast_reinit(net);
 
 	tipc_nametbl_publish(net, TIPC_CFG_SRV, tn->own_addr, tn->own_addr,
 			     TIPC_ZONE_SCOPE, 0, tn->own_addr);
@@ -142,7 +139,6 @@
 			      tn->own_addr);
 	rtnl_lock();
 	tipc_bearer_stop(net);
-	tipc_bclink_stop(net);
 	tipc_node_stop(net);
 	rtnl_unlock();
 
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 703875f..20cddec 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -72,7 +72,6 @@
 static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
 				bool delete);
 static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
-static void node_established_contact(struct tipc_node *n_ptr);
 static void tipc_node_delete(struct tipc_node *node);
 static void tipc_node_timeout(unsigned long data);
 static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
@@ -165,8 +164,10 @@
 	INIT_LIST_HEAD(&n_ptr->list);
 	INIT_LIST_HEAD(&n_ptr->publ_list);
 	INIT_LIST_HEAD(&n_ptr->conn_sks);
-	skb_queue_head_init(&n_ptr->bclink.namedq);
-	__skb_queue_head_init(&n_ptr->bclink.deferdq);
+	skb_queue_head_init(&n_ptr->bc_entry.namedq);
+	skb_queue_head_init(&n_ptr->bc_entry.inputq1);
+	__skb_queue_head_init(&n_ptr->bc_entry.arrvq);
+	skb_queue_head_init(&n_ptr->bc_entry.inputq2);
 	hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
 	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
 		if (n_ptr->addr < temp_node->addr)
@@ -177,6 +178,18 @@
 	n_ptr->signature = INVALID_NODE_SIG;
 	n_ptr->active_links[0] = INVALID_BEARER_ID;
 	n_ptr->active_links[1] = INVALID_BEARER_ID;
+	if (!tipc_link_bc_create(net, tipc_own_addr(net), n_ptr->addr,
+				 U16_MAX, tipc_bc_sndlink(net)->window,
+				 n_ptr->capabilities,
+				 &n_ptr->bc_entry.inputq1,
+				 &n_ptr->bc_entry.namedq,
+				 tipc_bc_sndlink(net),
+				 &n_ptr->bc_entry.link)) {
+		pr_warn("Broadcast rcv link creation failed, no memory\n");
+		kfree(n_ptr);
+		n_ptr = NULL;
+		goto exit;
+	}
 	tipc_node_get(n_ptr);
 	setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
 	n_ptr->keepalive_intv = U32_MAX;
@@ -203,6 +216,7 @@
 {
 	list_del_rcu(&node->list);
 	hlist_del_rcu(&node->hash);
+	kfree(node->bc_entry.link);
 	kfree_rcu(node, rcu);
 }
 
@@ -317,7 +331,11 @@
 	struct tipc_link *ol = node_active_link(n, 0);
 	struct tipc_link *nl = n->links[bearer_id].link;
 
-	if (!nl || !tipc_link_is_up(nl))
+	if (!nl)
+		return;
+
+	tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
+	if (!tipc_link_is_up(nl))
 		return;
 
 	n->working_links++;
@@ -328,6 +346,7 @@
 	n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE;
 
 	tipc_bearer_add_dest(n->net, bearer_id, n->addr);
+	tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
 
 	pr_debug("Established link <%s> on network plane %c\n",
 		 nl->name, nl->net_plane);
@@ -336,8 +355,9 @@
 	if (!ol) {
 		*slot0 = bearer_id;
 		*slot1 = bearer_id;
-		tipc_link_build_bcast_sync_msg(nl, xmitq);
-		node_established_contact(n);
+		tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
+		n->action_flags |= TIPC_NOTIFY_NODE_UP;
+		tipc_bcast_add_peer(n->net, nl, xmitq);
 		return;
 	}
 
@@ -346,8 +366,11 @@
 		pr_debug("Old link <%s> becomes standby\n", ol->name);
 		*slot0 = bearer_id;
 		*slot1 = bearer_id;
+		tipc_link_set_active(nl, true);
+		tipc_link_set_active(ol, false);
 	} else if (nl->priority == ol->priority) {
-		*slot0 = bearer_id;
+		tipc_link_set_active(nl, true);
+		*slot1 = bearer_id;
 	} else {
 		pr_debug("New link <%s> is standby\n", nl->name);
 	}
@@ -416,10 +439,18 @@
 	}
 
 	if (!tipc_node_is_up(n)) {
+		if (tipc_link_peer_is_down(l))
+			tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
+		tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
+		tipc_link_fsm_evt(l, LINK_RESET_EVT);
 		tipc_link_reset(l);
+		tipc_link_build_reset_msg(l, xmitq);
+		*maddr = &n->links[*bearer_id].maddr;
 		node_lost_contact(n, &le->inputq);
+		tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
 		return;
 	}
+	tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
 
 	/* There is still a working link => initiate failover */
 	tnl = node_active_link(n, 0);
@@ -428,6 +459,7 @@
 	n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
 	tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
 	tipc_link_reset(l);
+	tipc_link_fsm_evt(l, LINK_RESET_EVT);
 	tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
 	tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
 	*maddr = &n->links[tnl->bearer_id].maddr;
@@ -437,20 +469,28 @@
 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
 {
 	struct tipc_link_entry *le = &n->links[bearer_id];
+	struct tipc_link *l = le->link;
 	struct tipc_media_addr *maddr;
 	struct sk_buff_head xmitq;
 
+	if (!l)
+		return;
+
 	__skb_queue_head_init(&xmitq);
 
 	tipc_node_lock(n);
-	__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
-	if (delete && le->link) {
-		kfree(le->link);
-		le->link = NULL;
-		n->link_cnt--;
+	if (!tipc_link_is_establishing(l)) {
+		__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
+		if (delete) {
+			kfree(l);
+			le->link = NULL;
+			n->link_cnt--;
+		}
+	} else {
+		/* Defuse pending tipc_node_link_up() */
+		tipc_link_fsm_evt(l, LINK_RESET_EVT);
 	}
 	tipc_node_unlock(n);
-
 	tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
 	tipc_sk_rcv(n->net, &le->inputq);
 }
@@ -474,6 +514,7 @@
 	bool link_up = false;
 	bool accept_addr = false;
 	bool reset = true;
+	char *if_name;
 
 	*dupl_addr = false;
 	*respond = false;
@@ -560,13 +601,20 @@
 			pr_warn("Cannot establish 3rd link to %x\n", n->addr);
 			goto exit;
 		}
-		if (!tipc_link_create(n, b, mod(tipc_net(net)->random),
-				      tipc_own_addr(net), onode, &le->maddr,
-				      &le->inputq, &n->bclink.namedq, &l)) {
+		if_name = strchr(b->name, ':') + 1;
+		if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
+				      b->net_plane, b->mtu, b->priority,
+				      b->window, mod(tipc_net(net)->random),
+				      tipc_own_addr(net), onode,
+				      n->capabilities,
+				      tipc_bc_sndlink(n->net), n->bc_entry.link,
+				      &le->inputq,
+				      &n->bc_entry.namedq, &l)) {
 			*respond = false;
 			goto exit;
 		}
 		tipc_link_reset(l);
+		tipc_link_fsm_evt(l, LINK_RESET_EVT);
 		if (n->state == NODE_FAILINGOVER)
 			tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
 		le->link = l;
@@ -579,7 +627,7 @@
 	memcpy(&le->maddr, maddr, sizeof(*maddr));
 exit:
 	tipc_node_unlock(n);
-	if (reset)
+	if (reset && !tipc_link_is_reset(l))
 		tipc_node_link_down(n, b->identity, false);
 	tipc_node_put(n);
 }
@@ -686,10 +734,10 @@
 			break;
 		case SELF_ESTABL_CONTACT_EVT:
 		case PEER_LOST_CONTACT_EVT:
-			break;
 		case NODE_SYNCH_END_EVT:
-		case NODE_SYNCH_BEGIN_EVT:
 		case NODE_FAILOVER_BEGIN_EVT:
+			break;
+		case NODE_SYNCH_BEGIN_EVT:
 		case NODE_FAILOVER_END_EVT:
 		default:
 			goto illegal_evt;
@@ -804,61 +852,36 @@
 	return true;
 }
 
-static void node_established_contact(struct tipc_node *n_ptr)
-{
-	tipc_node_fsm_evt(n_ptr, SELF_ESTABL_CONTACT_EVT);
-	n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
-	n_ptr->bclink.oos_state = 0;
-	n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
-	tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
-}
-
-static void node_lost_contact(struct tipc_node *n_ptr,
+static void node_lost_contact(struct tipc_node *n,
 			      struct sk_buff_head *inputq)
 {
 	char addr_string[16];
 	struct tipc_sock_conn *conn, *safe;
 	struct tipc_link *l;
-	struct list_head *conns = &n_ptr->conn_sks;
+	struct list_head *conns = &n->conn_sks;
 	struct sk_buff *skb;
-	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
 	uint i;
 
 	pr_debug("Lost contact with %s\n",
-		 tipc_addr_string_fill(addr_string, n_ptr->addr));
+		 tipc_addr_string_fill(addr_string, n->addr));
 
-	/* Flush broadcast link info associated with lost node */
-	if (n_ptr->bclink.recv_permitted) {
-		__skb_queue_purge(&n_ptr->bclink.deferdq);
-
-		if (n_ptr->bclink.reasm_buf) {
-			kfree_skb(n_ptr->bclink.reasm_buf);
-			n_ptr->bclink.reasm_buf = NULL;
-		}
-
-		tipc_bclink_remove_node(n_ptr->net, n_ptr->addr);
-		tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
-
-		n_ptr->bclink.recv_permitted = false;
-	}
+	/* Clean up broadcast state */
+	tipc_bcast_remove_peer(n->net, n->bc_entry.link);
 
 	/* Abort any ongoing link failover */
 	for (i = 0; i < MAX_BEARERS; i++) {
-		l = n_ptr->links[i].link;
+		l = n->links[i].link;
 		if (l)
 			tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
 	}
 
-	/* Prevent re-contact with node until cleanup is done */
-	tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT);
-
 	/* Notify publications from this node */
-	n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
+	n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
 
 	/* Notify sockets connected to node */
 	list_for_each_entry_safe(conn, safe, conns, list) {
 		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
-				      SHORT_H_SIZE, 0, tn->own_addr,
+				      SHORT_H_SIZE, 0, tipc_own_addr(n->net),
 				      conn->peer_node, conn->port,
 				      conn->peer_port, TIPC_ERR_NO_NODE);
 		if (likely(skb))
@@ -920,18 +943,13 @@
 	publ_list = &node->publ_list;
 
 	node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
-				TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
-				TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
-				TIPC_BCAST_RESET);
+				TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
 
 	spin_unlock_bh(&node->lock);
 
 	if (flags & TIPC_NOTIFY_NODE_DOWN)
 		tipc_publ_notify(net, publ_list, addr);
 
-	if (flags & TIPC_WAKEUP_BCAST_USERS)
-		tipc_bclink_wakeup_users(net);
-
 	if (flags & TIPC_NOTIFY_NODE_UP)
 		tipc_named_node_up(net, addr);
 
@@ -943,11 +961,6 @@
 		tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
 				      link_id, addr);
 
-	if (flags & TIPC_BCAST_MSG_EVT)
-		tipc_bclink_input(net);
-
-	if (flags & TIPC_BCAST_RESET)
-		tipc_node_reset_links(node);
 }
 
 /* Caller should hold node lock for the passed node */
@@ -1063,6 +1076,67 @@
 }
 
 /**
+ * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
+ * @net: the applicable net namespace
+ * @skb: TIPC packet
+ * @bearer_id: id of bearer message arrived on
+ *
+ * Invoked with no locks held.
+ */
+static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
+{
+	int rc;
+	struct sk_buff_head xmitq;
+	struct tipc_bclink_entry *be;
+	struct tipc_link_entry *le;
+	struct tipc_msg *hdr = buf_msg(skb);
+	int usr = msg_user(hdr);
+	u32 dnode = msg_destnode(hdr);
+	struct tipc_node *n;
+
+	__skb_queue_head_init(&xmitq);
+
+	/* If NACK for other node, let rcv link for that node peek into it */
+	if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
+		n = tipc_node_find(net, dnode);
+	else
+		n = tipc_node_find(net, msg_prevnode(hdr));
+	if (!n) {
+		kfree_skb(skb);
+		return;
+	}
+	be = &n->bc_entry;
+	le = &n->links[bearer_id];
+
+	rc = tipc_bcast_rcv(net, be->link, skb);
+
+	/* Broadcast link reset may happen at reassembly failure */
+	if (rc & TIPC_LINK_DOWN_EVT)
+		tipc_node_reset_links(n);
+
+	/* Broadcast ACKs are sent on a unicast link */
+	if (rc & TIPC_LINK_SND_BC_ACK) {
+		tipc_node_lock(n);
+		tipc_link_build_ack_msg(le->link, &xmitq);
+		tipc_node_unlock(n);
+	}
+
+	if (!skb_queue_empty(&xmitq))
+		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+
+	/* Deliver. 'arrvq' is under inputq2's lock protection */
+	if (!skb_queue_empty(&be->inputq1)) {
+		spin_lock_bh(&be->inputq2.lock);
+		spin_lock_bh(&be->inputq1.lock);
+		skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
+		spin_unlock_bh(&be->inputq1.lock);
+		spin_unlock_bh(&be->inputq2.lock);
+		tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
+	}
+	tipc_node_put(n);
+}
+
+/**
  * tipc_node_check_state - check and if necessary update node state
  * @skb: TIPC packet
  * @bearer_id: identity of bearer delivering the packet
@@ -1116,7 +1190,7 @@
 	}
 
 	/* Ignore duplicate packets */
-	if (less(oseqno, rcv_nxt))
+	if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
 		return true;
 
 	/* Initiate or update failover mode if applicable */
@@ -1146,8 +1220,8 @@
 	if (!pl || !tipc_link_is_up(pl))
 		return true;
 
-	/* Initiate or update synch mode if applicable */
-	if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
+	/* Initiate synch mode if applicable */
+	if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
 		syncpt = iseqno + exp_pkts - 1;
 		if (!tipc_link_is_up(l)) {
 			tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
@@ -1204,6 +1278,7 @@
 	int usr = msg_user(hdr);
 	int bearer_id = b->identity;
 	struct tipc_link_entry *le;
+	u16 bc_ack = msg_bcast_ack(hdr);
 	int rc = 0;
 
 	__skb_queue_head_init(&xmitq);
@@ -1212,13 +1287,12 @@
 	if (unlikely(!tipc_msg_validate(skb)))
 		goto discard;
 
-	/* Handle arrival of a non-unicast link packet */
+	/* Handle arrival of discovery or broadcast packet */
 	if (unlikely(msg_non_seq(hdr))) {
-		if (usr ==  LINK_CONFIG)
-			tipc_disc_rcv(net, skb, b);
+		if (unlikely(usr == LINK_CONFIG))
+			return tipc_disc_rcv(net, skb, b);
 		else
-			tipc_bclink_rcv(net, skb);
-		return;
+			return tipc_node_bc_rcv(net, skb, bearer_id);
 	}
 
 	/* Locate neighboring node that sent packet */
@@ -1227,19 +1301,18 @@
 		goto discard;
 	le = &n->links[bearer_id];
 
+	/* Ensure broadcast reception is in synch with peer's send state */
+	if (unlikely(usr == LINK_PROTOCOL))
+		tipc_bcast_sync_rcv(net, n->bc_entry.link, hdr);
+	else if (unlikely(n->bc_entry.link->acked != bc_ack))
+		tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
+
 	tipc_node_lock(n);
 
 	/* Is reception permitted at the moment ? */
 	if (!tipc_node_filter_pkt(n, hdr))
 		goto unlock;
 
-	if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
-		tipc_bclink_sync_state(n, hdr);
-
-	/* Release acked broadcast packets */
-	if (unlikely(n->bclink.acked != msg_bcast_ack(hdr)))
-		tipc_bclink_acknowledge(n, msg_bcast_ack(hdr));
-
 	/* Check and if necessary update node state */
 	if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) {
 		rc = tipc_link_rcv(le->link, skb, &xmitq);
@@ -1254,8 +1327,8 @@
 	if (unlikely(rc & TIPC_LINK_DOWN_EVT))
 		tipc_node_link_down(n, bearer_id, false);
 
-	if (unlikely(!skb_queue_empty(&n->bclink.namedq)))
-		tipc_named_rcv(net, &n->bclink.namedq);
+	if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
+		tipc_named_rcv(net, &n->bc_entry.namedq);
 
 	if (!skb_queue_empty(&le->inputq))
 		tipc_sk_rcv(net, &le->inputq);
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 344b3e7..6734562 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -55,36 +55,18 @@
 enum {
 	TIPC_NOTIFY_NODE_DOWN		= (1 << 3),
 	TIPC_NOTIFY_NODE_UP		= (1 << 4),
-	TIPC_WAKEUP_BCAST_USERS		= (1 << 5),
 	TIPC_NOTIFY_LINK_UP		= (1 << 6),
-	TIPC_NOTIFY_LINK_DOWN		= (1 << 7),
-	TIPC_BCAST_MSG_EVT		= (1 << 9),
-	TIPC_BCAST_RESET		= (1 << 10)
+	TIPC_NOTIFY_LINK_DOWN		= (1 << 7)
 };
 
-/**
- * struct tipc_node_bclink - TIPC node bclink structure
- * @acked: sequence # of last outbound b'cast message acknowledged by node
- * @last_in: sequence # of last in-sequence b'cast message received from node
- * @last_sent: sequence # of last b'cast message sent by node
- * @oos_state: state tracker for handling OOS b'cast messages
- * @deferred_queue: deferred queue saved OOS b'cast message received from node
- * @reasm_buf: broadcast reassembly queue head from node
- * @inputq_map: bitmap indicating which inqueues should be kicked
- * @recv_permitted: true if node is allowed to receive b'cast messages
+/* Optional capabilities supported by this code version
  */
-struct tipc_node_bclink {
-	u32 acked;
-	u32 last_in;
-	u32 last_sent;
-	u32 oos_state;
-	u32 deferred_size;
-	struct sk_buff_head deferdq;
-	struct sk_buff *reasm_buf;
-	struct sk_buff_head namedq;
-	bool recv_permitted;
+enum {
+	TIPC_BCAST_SYNCH = (1 << 1)
 };
 
+#define TIPC_NODE_CAPABILITIES TIPC_BCAST_SYNCH
+
 struct tipc_link_entry {
 	struct tipc_link *link;
 	u32 mtu;
@@ -92,6 +74,14 @@
 	struct tipc_media_addr maddr;
 };
 
+struct tipc_bclink_entry {
+	struct tipc_link *link;
+	struct sk_buff_head inputq1;
+	struct sk_buff_head arrvq;
+	struct sk_buff_head inputq2;
+	struct sk_buff_head namedq;
+};
+
 /**
  * struct tipc_node - TIPC node structure
  * @addr: network address of node
@@ -104,7 +94,6 @@
  * @active_links: bearer ids of active links, used as index into links[] array
  * @links: array containing references to all links to node
  * @action_flags: bit mask of different types of node actions
- * @bclink: broadcast-related info
  * @state: connectivity state vs peer node
  * @sync_point: sequence number where synch/failover is finished
  * @list: links to adjacent nodes in sorted list of cluster's nodes
@@ -124,8 +113,8 @@
 	struct hlist_node hash;
 	int active_links[2];
 	struct tipc_link_entry links[MAX_BEARERS];
+	struct tipc_bclink_entry bc_entry;
 	int action_flags;
-	struct tipc_node_bclink bclink;
 	struct list_head list;
 	int state;
 	u16 sync_point;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1060d52..552dbab 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -689,13 +689,13 @@
 	msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
 
 new_mtu:
-	mtu = tipc_bclink_get_mtu();
+	mtu = tipc_bcast_get_mtu(net);
 	rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
 	if (unlikely(rc < 0))
 		return rc;
 
 	do {
-		rc = tipc_bclink_xmit(net, pktchain);
+		rc = tipc_bcast_xmit(net, pktchain);
 		if (likely(!rc))
 			return dsz;
 
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index c170d31..816914e 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -52,6 +52,8 @@
 /* IANA assigned UDP port */
 #define UDP_PORT_DEFAULT	6118
 
+#define UDP_MIN_HEADROOM        28
+
 static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
 	[TIPC_NLA_UDP_UNSPEC]	= {.type = NLA_UNSPEC},
 	[TIPC_NLA_UDP_LOCAL]	= {.type = NLA_BINARY,
@@ -153,11 +155,12 @@
 	struct udp_bearer *ub;
 	struct udp_media_addr *dst = (struct udp_media_addr *)&dest->value;
 	struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value;
-	struct sk_buff *clone;
 	struct rtable *rt;
 
-	clone = skb_clone(skb, GFP_ATOMIC);
-	skb_set_inner_protocol(clone, htons(ETH_P_TIPC));
+	if (skb_headroom(skb) < UDP_MIN_HEADROOM)
+		pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC);
+
+	skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
 	ub = rcu_dereference_rtnl(b->media_ptr);
 	if (!ub) {
 		err = -ENODEV;
@@ -167,7 +170,7 @@
 		struct flowi4 fl = {
 			.daddr = dst->ipv4.s_addr,
 			.saddr = src->ipv4.s_addr,
-			.flowi4_mark = clone->mark,
+			.flowi4_mark = skb->mark,
 			.flowi4_proto = IPPROTO_UDP
 		};
 		rt = ip_route_output_key(net, &fl);
@@ -176,7 +179,7 @@
 			goto tx_error;
 		}
 		ttl = ip4_dst_hoplimit(&rt->dst);
-		err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, clone,
+		err = udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb,
 					  src->ipv4.s_addr,
 					  dst->ipv4.s_addr, 0, ttl, 0,
 					  src->udp_port, dst->udp_port,
@@ -199,7 +202,7 @@
 		if (err)
 			goto tx_error;
 		ttl = ip6_dst_hoplimit(ndst);
-		err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, clone,
+		err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb,
 					   ndst->dev, &src->ipv6,
 					   &dst->ipv6, 0, ttl, src->udp_port,
 					   dst->udp_port, false);
@@ -208,7 +211,7 @@
 	return err;
 
 tx_error:
-	kfree_skb(clone);
+	kfree_skb(skb);
 	return err;
 }
 
@@ -425,7 +428,6 @@
 	}
 	if (ub->ubsock)
 		sock_set_flag(ub->ubsock->sk, SOCK_DEAD);
-	RCU_INIT_POINTER(b->media_ptr, NULL);
 	RCU_INIT_POINTER(ub->bearer, NULL);
 
 	/* sock_release need to be done outside of rtnl lock */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 03ee4d3..aaa0b58 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -326,9 +326,10 @@
 	return s;
 }
 
-static inline int unix_writable(struct sock *sk)
+static int unix_writable(const struct sock *sk)
 {
-	return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+	return sk->sk_state != TCP_LISTEN &&
+	       (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
 }
 
 static void unix_write_space(struct sock *sk)
@@ -2064,6 +2065,11 @@
 		goto out;
 	}
 
+	if (flags & MSG_PEEK)
+		skip = sk_peek_offset(sk, flags);
+	else
+		skip = 0;
+
 	do {
 		int chunk;
 		struct sk_buff *skb, *last;
@@ -2112,7 +2118,6 @@
 			break;
 		}
 
-		skip = sk_peek_offset(sk, flags);
 		while (skip >= unix_skb_len(skb)) {
 			skip -= unix_skb_len(skb);
 			last = skb;
@@ -2181,6 +2186,17 @@
 
 			sk_peek_offset_fwd(sk, chunk);
 
+			if (UNIXCB(skb).fp)
+				break;
+
+			skip = 0;
+			last = skb;
+			last_len = skb->len;
+			unix_state_lock(sk);
+			skb = skb_peek_next(skb, &sk->sk_receive_queue);
+			if (skb)
+				goto again;
+			unix_state_unlock(sk);
 			break;
 		}
 	} while (size);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index df5fc6b..00e8a34 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1948,13 +1948,13 @@
 	err = misc_register(&vsock_device);
 	if (err) {
 		pr_err("Failed to register misc device\n");
-		return -ENOENT;
+		goto err_reset_transport;
 	}
 
 	err = proto_register(&vsock_proto, 1);	/* we want our slab */
 	if (err) {
 		pr_err("Cannot register vsock protocol\n");
-		goto err_misc_deregister;
+		goto err_deregister_misc;
 	}
 
 	err = sock_register(&vsock_family_ops);
@@ -1969,8 +1969,9 @@
 
 err_unregister_proto:
 	proto_unregister(&vsock_proto);
-err_misc_deregister:
+err_deregister_misc:
 	misc_deregister(&vsock_device);
+err_reset_transport:
 	transport = NULL;
 err_busy:
 	mutex_unlock(&vsock_register_mutex);
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 1f63daf..7555cad 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -40,13 +40,11 @@
 
 static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
 static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
-static void vmci_transport_peer_attach_cb(u32 sub_id,
-					  const struct vmci_event_data *ed,
-					  void *client_data);
 static void vmci_transport_peer_detach_cb(u32 sub_id,
 					  const struct vmci_event_data *ed,
 					  void *client_data);
 static void vmci_transport_recv_pkt_work(struct work_struct *work);
+static void vmci_transport_cleanup(struct work_struct *work);
 static int vmci_transport_recv_listen(struct sock *sk,
 				      struct vmci_transport_packet *pkt);
 static int vmci_transport_recv_connecting_server(
@@ -75,6 +73,10 @@
 	struct vmci_transport_packet pkt;
 };
 
+static LIST_HEAD(vmci_transport_cleanup_list);
+static DEFINE_SPINLOCK(vmci_transport_cleanup_lock);
+static DECLARE_WORK(vmci_transport_cleanup_work, vmci_transport_cleanup);
+
 static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
 							   VMCI_INVALID_ID };
 static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
@@ -791,44 +793,6 @@
 	return err;
 }
 
-static void vmci_transport_peer_attach_cb(u32 sub_id,
-					  const struct vmci_event_data *e_data,
-					  void *client_data)
-{
-	struct sock *sk = client_data;
-	const struct vmci_event_payload_qp *e_payload;
-	struct vsock_sock *vsk;
-
-	e_payload = vmci_event_data_const_payload(e_data);
-
-	vsk = vsock_sk(sk);
-
-	/* We don't ask for delayed CBs when we subscribe to this event (we
-	 * pass 0 as flags to vmci_event_subscribe()).  VMCI makes no
-	 * guarantees in that case about what context we might be running in,
-	 * so it could be BH or process, blockable or non-blockable.  So we
-	 * need to account for all possible contexts here.
-	 */
-	local_bh_disable();
-	bh_lock_sock(sk);
-
-	/* XXX This is lame, we should provide a way to lookup sockets by
-	 * qp_handle.
-	 */
-	if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
-				 e_payload->handle)) {
-		/* XXX This doesn't do anything, but in the future we may want
-		 * to set a flag here to verify the attach really did occur and
-		 * we weren't just sent a datagram claiming it was.
-		 */
-		goto out;
-	}
-
-out:
-	bh_unlock_sock(sk);
-	local_bh_enable();
-}
-
 static void vmci_transport_handle_detach(struct sock *sk)
 {
 	struct vsock_sock *vsk;
@@ -871,28 +835,38 @@
 					  const struct vmci_event_data *e_data,
 					  void *client_data)
 {
-	struct sock *sk = client_data;
+	struct vmci_transport *trans = client_data;
 	const struct vmci_event_payload_qp *e_payload;
-	struct vsock_sock *vsk;
 
 	e_payload = vmci_event_data_const_payload(e_data);
-	vsk = vsock_sk(sk);
-	if (vmci_handle_is_invalid(e_payload->handle))
-		return;
-
-	/* Same rules for locking as for peer_attach_cb(). */
-	local_bh_disable();
-	bh_lock_sock(sk);
 
 	/* XXX This is lame, we should provide a way to lookup sockets by
 	 * qp_handle.
 	 */
-	if (vmci_handle_is_equal(vmci_trans(vsk)->qp_handle,
-				 e_payload->handle))
-		vmci_transport_handle_detach(sk);
+	if (vmci_handle_is_invalid(e_payload->handle) ||
+	    vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
+		return;
 
-	bh_unlock_sock(sk);
-	local_bh_enable();
+	/* We don't ask for delayed CBs when we subscribe to this event (we
+	 * pass 0 as flags to vmci_event_subscribe()).  VMCI makes no
+	 * guarantees in that case about what context we might be running in,
+	 * so it could be BH or process, blockable or non-blockable.  So we
+	 * need to account for all possible contexts here.
+	 */
+	spin_lock_bh(&trans->lock);
+	if (!trans->sk)
+		goto out;
+
+	/* Apart from here, trans->lock is only grabbed as part of sk destruct,
+	 * where trans->sk isn't locked.
+	 */
+	bh_lock_sock(trans->sk);
+
+	vmci_transport_handle_detach(trans->sk);
+
+	bh_unlock_sock(trans->sk);
+ out:
+	spin_unlock_bh(&trans->lock);
 }
 
 static void vmci_transport_qp_resumed_cb(u32 sub_id,
@@ -1181,7 +1155,7 @@
 	 */
 	err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
 				   vmci_transport_peer_detach_cb,
-				   pending, &detach_sub_id);
+				   vmci_trans(vpending), &detach_sub_id);
 	if (err < VMCI_SUCCESS) {
 		vmci_transport_send_reset(pending, pkt);
 		err = vmci_transport_error_to_vsock_error(err);
@@ -1321,7 +1295,6 @@
 		    || vmci_trans(vsk)->qpair
 		    || vmci_trans(vsk)->produce_size != 0
 		    || vmci_trans(vsk)->consume_size != 0
-		    || vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID
 		    || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
 			skerr = EPROTO;
 			err = -EINVAL;
@@ -1389,7 +1362,6 @@
 	struct vsock_sock *vsk;
 	struct vmci_handle handle;
 	struct vmci_qp *qpair;
-	u32 attach_sub_id;
 	u32 detach_sub_id;
 	bool is_local;
 	u32 flags;
@@ -1399,7 +1371,6 @@
 
 	vsk = vsock_sk(sk);
 	handle = VMCI_INVALID_HANDLE;
-	attach_sub_id = VMCI_INVALID_ID;
 	detach_sub_id = VMCI_INVALID_ID;
 
 	/* If we have gotten here then we should be past the point where old
@@ -1444,23 +1415,15 @@
 		goto destroy;
 	}
 
-	/* Subscribe to attach and detach events first.
+	/* Subscribe to detach events first.
 	 *
 	 * XXX We attach once for each queue pair created for now so it is easy
 	 * to find the socket (it's provided), but later we should only
 	 * subscribe once and add a way to lookup sockets by queue pair handle.
 	 */
-	err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_ATTACH,
-				   vmci_transport_peer_attach_cb,
-				   sk, &attach_sub_id);
-	if (err < VMCI_SUCCESS) {
-		err = vmci_transport_error_to_vsock_error(err);
-		goto destroy;
-	}
-
 	err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
 				   vmci_transport_peer_detach_cb,
-				   sk, &detach_sub_id);
+				   vmci_trans(vsk), &detach_sub_id);
 	if (err < VMCI_SUCCESS) {
 		err = vmci_transport_error_to_vsock_error(err);
 		goto destroy;
@@ -1496,7 +1459,6 @@
 	vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
 		pkt->u.size;
 
-	vmci_trans(vsk)->attach_sub_id = attach_sub_id;
 	vmci_trans(vsk)->detach_sub_id = detach_sub_id;
 
 	vmci_trans(vsk)->notify_ops->process_negotiate(sk);
@@ -1504,9 +1466,6 @@
 	return 0;
 
 destroy:
-	if (attach_sub_id != VMCI_INVALID_ID)
-		vmci_event_unsubscribe(attach_sub_id);
-
 	if (detach_sub_id != VMCI_INVALID_ID)
 		vmci_event_unsubscribe(detach_sub_id);
 
@@ -1607,9 +1566,11 @@
 	vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
 	vmci_trans(vsk)->qpair = NULL;
 	vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
-	vmci_trans(vsk)->attach_sub_id = vmci_trans(vsk)->detach_sub_id =
-		VMCI_INVALID_ID;
+	vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
 	vmci_trans(vsk)->notify_ops = NULL;
+	INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
+	vmci_trans(vsk)->sk = &vsk->sk;
+	spin_lock_init(&vmci_trans(vsk)->lock);
 	if (psk) {
 		vmci_trans(vsk)->queue_pair_size =
 			vmci_trans(psk)->queue_pair_size;
@@ -1629,29 +1590,57 @@
 	return 0;
 }
 
+static void vmci_transport_free_resources(struct list_head *transport_list)
+{
+	while (!list_empty(transport_list)) {
+		struct vmci_transport *transport =
+		    list_first_entry(transport_list, struct vmci_transport,
+				     elem);
+		list_del(&transport->elem);
+
+		if (transport->detach_sub_id != VMCI_INVALID_ID) {
+			vmci_event_unsubscribe(transport->detach_sub_id);
+			transport->detach_sub_id = VMCI_INVALID_ID;
+		}
+
+		if (!vmci_handle_is_invalid(transport->qp_handle)) {
+			vmci_qpair_detach(&transport->qpair);
+			transport->qp_handle = VMCI_INVALID_HANDLE;
+			transport->produce_size = 0;
+			transport->consume_size = 0;
+		}
+
+		kfree(transport);
+	}
+}
+
+static void vmci_transport_cleanup(struct work_struct *work)
+{
+	LIST_HEAD(pending);
+
+	spin_lock_bh(&vmci_transport_cleanup_lock);
+	list_replace_init(&vmci_transport_cleanup_list, &pending);
+	spin_unlock_bh(&vmci_transport_cleanup_lock);
+	vmci_transport_free_resources(&pending);
+}
+
 static void vmci_transport_destruct(struct vsock_sock *vsk)
 {
-	if (vmci_trans(vsk)->attach_sub_id != VMCI_INVALID_ID) {
-		vmci_event_unsubscribe(vmci_trans(vsk)->attach_sub_id);
-		vmci_trans(vsk)->attach_sub_id = VMCI_INVALID_ID;
-	}
-
-	if (vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
-		vmci_event_unsubscribe(vmci_trans(vsk)->detach_sub_id);
-		vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
-	}
-
-	if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
-		vmci_qpair_detach(&vmci_trans(vsk)->qpair);
-		vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
-		vmci_trans(vsk)->produce_size = 0;
-		vmci_trans(vsk)->consume_size = 0;
-	}
+	/* Ensure that the detach callback doesn't use the sk/vsk
+	 * we are about to destruct.
+	 */
+	spin_lock_bh(&vmci_trans(vsk)->lock);
+	vmci_trans(vsk)->sk = NULL;
+	spin_unlock_bh(&vmci_trans(vsk)->lock);
 
 	if (vmci_trans(vsk)->notify_ops)
 		vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
 
-	kfree(vsk->trans);
+	spin_lock_bh(&vmci_transport_cleanup_lock);
+	list_add(&vmci_trans(vsk)->elem, &vmci_transport_cleanup_list);
+	spin_unlock_bh(&vmci_transport_cleanup_lock);
+	schedule_work(&vmci_transport_cleanup_work);
+
 	vsk->trans = NULL;
 }
 
@@ -2146,6 +2135,9 @@
 
 static void __exit vmci_transport_exit(void)
 {
+	cancel_work_sync(&vmci_transport_cleanup_work);
+	vmci_transport_free_resources(&vmci_transport_cleanup_list);
+
 	if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
 		if (vmci_datagram_destroy_handle(
 			vmci_transport_stream_handle) != VMCI_SUCCESS)
@@ -2164,6 +2156,7 @@
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
+MODULE_VERSION("1.0.2.0-k");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("vmware_vsock");
 MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
index ce6c962..2ad46f3 100644
--- a/net/vmw_vsock/vmci_transport.h
+++ b/net/vmw_vsock/vmci_transport.h
@@ -119,10 +119,12 @@
 	u64 queue_pair_size;
 	u64 queue_pair_min_size;
 	u64 queue_pair_max_size;
-	u32 attach_sub_id;
 	u32 detach_sub_id;
 	union vmci_transport_notify notify;
 	struct vmci_transport_notify_ops *notify_ops;
+	struct list_head elem;
+	struct sock *sk;
+	spinlock_t lock; /* protects sk. */
 };
 
 int vmci_transport_register(void);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 4f5543d..da72ed3 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -174,6 +174,16 @@
 
 	  Most distributions have a CRDA package.  So if unsure, say N.
 
+config CFG80211_CRDA_SUPPORT
+	bool "support CRDA" if CFG80211_INTERNAL_REGDB
+	default y
+	depends on CFG80211
+	help
+	  You should enable this option unless you know for sure you have no
+	  need for it, for example when using internal regdb (above.)
+
+	  If unsure, say Y.
+
 config CFG80211_WEXT
 	bool "cfg80211 wireless extensions compatibility" if !CFG80211_WEXT_EXPORT
 	depends on CFG80211
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3893409..b091551 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -419,6 +419,7 @@
 	device_initialize(&rdev->wiphy.dev);
 	rdev->wiphy.dev.class = &ieee80211_class;
 	rdev->wiphy.dev.platform_data = rdev;
+	device_enable_async_suspend(&rdev->wiphy.dev);
 
 	INIT_LIST_HEAD(&rdev->destroy_list);
 	spin_lock_init(&rdev->destroy_list_lock);
@@ -460,6 +461,9 @@
 
 	rdev->wiphy.max_num_csa_counters = 1;
 
+	rdev->wiphy.max_sched_scan_plans = 1;
+	rdev->wiphy.max_sched_scan_plan_interval = U32_MAX;
+
 	return &rdev->wiphy;
 }
 EXPORT_SYMBOL(wiphy_new_nm);
@@ -635,7 +639,7 @@
 		if (WARN_ON(!sband->n_channels))
 			return -EINVAL;
 		/*
-		 * on 60gHz band, there are no legacy rates, so
+		 * on 60GHz band, there are no legacy rates, so
 		 * n_bitrates is 0
 		 */
 		if (WARN_ON(band != IEEE80211_BAND_60GHZ &&
diff --git a/net/wireless/core.h b/net/wireless/core.h
index b9d5bc8..a618b4b 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -137,6 +137,7 @@
 	struct list_head list;
 	struct list_head hidden_list;
 	struct rb_node rbn;
+	u64 ts_boottime;
 	unsigned long ts;
 	unsigned long refcount;
 	atomic_t hold;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 5d8748b..d693c9d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3,6 +3,7 @@
  *
  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright 2015	Intel Deutschland GmbH
  */
 
 #include <linux/if.h>
@@ -478,6 +479,12 @@
 	[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
 };
 
+static const struct nla_policy
+nl80211_plan_policy[NL80211_SCHED_SCAN_PLAN_MAX + 1] = {
+	[NL80211_SCHED_SCAN_PLAN_INTERVAL] = { .type = NLA_U32 },
+	[NL80211_SCHED_SCAN_PLAN_ITERATIONS] = { .type = NLA_U32 },
+};
+
 static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
 				     struct netlink_callback *cb,
 				     struct cfg80211_registered_device **rdev,
@@ -1303,7 +1310,13 @@
 		    nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
 				rdev->wiphy.max_sched_scan_ie_len) ||
 		    nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
-			       rdev->wiphy.max_match_sets))
+			       rdev->wiphy.max_match_sets) ||
+		    nla_put_u32(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS,
+				rdev->wiphy.max_sched_scan_plans) ||
+		    nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL,
+				rdev->wiphy.max_sched_scan_plan_interval) ||
+		    nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS,
+				rdev->wiphy.max_sched_scan_plan_iterations))
 			goto nla_put_failure;
 
 		if ((rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
@@ -2403,6 +2416,16 @@
 		}
 	}
 
+	if (rdev->ops->get_tx_power) {
+		int dbm, ret;
+
+		ret = rdev_get_tx_power(rdev, wdev, &dbm);
+		if (ret == 0 &&
+		    nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
+				DBM_TO_MBM(dbm)))
+			goto nla_put_failure;
+	}
+
 	if (wdev->ssid_len) {
 		if (nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
 			goto nla_put_failure;
@@ -3998,7 +4021,8 @@
 		params->sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
 	}
 
-	if (statype != CFG80211_STA_TDLS_PEER_SETUP) {
+	if (statype != CFG80211_STA_TDLS_PEER_SETUP &&
+	    statype != CFG80211_STA_AP_CLIENT_UNASSOC) {
 		/* reject other things that can't change */
 		if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD)
 			return -EINVAL;
@@ -4010,7 +4034,8 @@
 			return -EINVAL;
 	}
 
-	if (statype != CFG80211_STA_AP_CLIENT) {
+	if (statype != CFG80211_STA_AP_CLIENT &&
+	    statype != CFG80211_STA_AP_CLIENT_UNASSOC) {
 		if (params->vlan)
 			return -EINVAL;
 	}
@@ -4022,6 +4047,7 @@
 			return -EOPNOTSUPP;
 		break;
 	case CFG80211_STA_AP_CLIENT:
+	case CFG80211_STA_AP_CLIENT_UNASSOC:
 		/* accept only the listed bits */
 		if (params->sta_flags_mask &
 				~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
@@ -4918,56 +4944,6 @@
 	return err;
 }
 
-static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
-	[NL80211_ATTR_REG_RULE_FLAGS]		= { .type = NLA_U32 },
-	[NL80211_ATTR_FREQ_RANGE_START]		= { .type = NLA_U32 },
-	[NL80211_ATTR_FREQ_RANGE_END]		= { .type = NLA_U32 },
-	[NL80211_ATTR_FREQ_RANGE_MAX_BW]	= { .type = NLA_U32 },
-	[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]	= { .type = NLA_U32 },
-	[NL80211_ATTR_POWER_RULE_MAX_EIRP]	= { .type = NLA_U32 },
-	[NL80211_ATTR_DFS_CAC_TIME]		= { .type = NLA_U32 },
-};
-
-static int parse_reg_rule(struct nlattr *tb[],
-	struct ieee80211_reg_rule *reg_rule)
-{
-	struct ieee80211_freq_range *freq_range = &reg_rule->freq_range;
-	struct ieee80211_power_rule *power_rule = &reg_rule->power_rule;
-
-	if (!tb[NL80211_ATTR_REG_RULE_FLAGS])
-		return -EINVAL;
-	if (!tb[NL80211_ATTR_FREQ_RANGE_START])
-		return -EINVAL;
-	if (!tb[NL80211_ATTR_FREQ_RANGE_END])
-		return -EINVAL;
-	if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
-		return -EINVAL;
-	if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
-		return -EINVAL;
-
-	reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]);
-
-	freq_range->start_freq_khz =
-		nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
-	freq_range->end_freq_khz =
-		nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
-	freq_range->max_bandwidth_khz =
-		nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
-
-	power_rule->max_eirp =
-		nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
-
-	if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN])
-		power_rule->max_antenna_gain =
-			nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
-
-	if (tb[NL80211_ATTR_DFS_CAC_TIME])
-		reg_rule->dfs_cac_ms =
-			nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]);
-
-	return 0;
-}
-
 static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
 {
 	char *data = NULL;
@@ -5599,6 +5575,57 @@
 	return err;
 }
 
+#ifdef CONFIG_CFG80211_CRDA_SUPPORT
+static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
+	[NL80211_ATTR_REG_RULE_FLAGS]		= { .type = NLA_U32 },
+	[NL80211_ATTR_FREQ_RANGE_START]		= { .type = NLA_U32 },
+	[NL80211_ATTR_FREQ_RANGE_END]		= { .type = NLA_U32 },
+	[NL80211_ATTR_FREQ_RANGE_MAX_BW]	= { .type = NLA_U32 },
+	[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]	= { .type = NLA_U32 },
+	[NL80211_ATTR_POWER_RULE_MAX_EIRP]	= { .type = NLA_U32 },
+	[NL80211_ATTR_DFS_CAC_TIME]		= { .type = NLA_U32 },
+};
+
+static int parse_reg_rule(struct nlattr *tb[],
+	struct ieee80211_reg_rule *reg_rule)
+{
+	struct ieee80211_freq_range *freq_range = &reg_rule->freq_range;
+	struct ieee80211_power_rule *power_rule = &reg_rule->power_rule;
+
+	if (!tb[NL80211_ATTR_REG_RULE_FLAGS])
+		return -EINVAL;
+	if (!tb[NL80211_ATTR_FREQ_RANGE_START])
+		return -EINVAL;
+	if (!tb[NL80211_ATTR_FREQ_RANGE_END])
+		return -EINVAL;
+	if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
+		return -EINVAL;
+	if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
+		return -EINVAL;
+
+	reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]);
+
+	freq_range->start_freq_khz =
+		nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
+	freq_range->end_freq_khz =
+		nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
+	freq_range->max_bandwidth_khz =
+		nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
+
+	power_rule->max_eirp =
+		nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
+
+	if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN])
+		power_rule->max_antenna_gain =
+			nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
+
+	if (tb[NL80211_ATTR_DFS_CAC_TIME])
+		reg_rule->dfs_cac_ms =
+			nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]);
+
+	return 0;
+}
+
 static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
 {
 	struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1];
@@ -5675,6 +5702,7 @@
 	kfree(rd);
 	return r;
 }
+#endif /* CONFIG_CFG80211_CRDA_SUPPORT */
 
 static int validate_scan_freqs(struct nlattr *freqs)
 {
@@ -5960,14 +5988,100 @@
 	return err;
 }
 
+static int
+nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans,
+			       struct cfg80211_sched_scan_request *request,
+			       struct nlattr **attrs)
+{
+	int tmp, err, i = 0;
+	struct nlattr *attr;
+
+	if (!attrs[NL80211_ATTR_SCHED_SCAN_PLANS]) {
+		u32 interval;
+
+		/*
+		 * If scan plans are not specified,
+		 * %NL80211_ATTR_SCHED_SCAN_INTERVAL must be specified. In this
+		 * case one scan plan will be set with the specified scan
+		 * interval and infinite number of iterations.
+		 */
+		if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
+			return -EINVAL;
+
+		interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
+		if (!interval)
+			return -EINVAL;
+
+		request->scan_plans[0].interval =
+			DIV_ROUND_UP(interval, MSEC_PER_SEC);
+		if (!request->scan_plans[0].interval)
+			return -EINVAL;
+
+		if (request->scan_plans[0].interval >
+		    wiphy->max_sched_scan_plan_interval)
+			request->scan_plans[0].interval =
+				wiphy->max_sched_scan_plan_interval;
+
+		return 0;
+	}
+
+	nla_for_each_nested(attr, attrs[NL80211_ATTR_SCHED_SCAN_PLANS], tmp) {
+		struct nlattr *plan[NL80211_SCHED_SCAN_PLAN_MAX + 1];
+
+		if (WARN_ON(i >= n_plans))
+			return -EINVAL;
+
+		err = nla_parse(plan, NL80211_SCHED_SCAN_PLAN_MAX,
+				nla_data(attr), nla_len(attr),
+				nl80211_plan_policy);
+		if (err)
+			return err;
+
+		if (!plan[NL80211_SCHED_SCAN_PLAN_INTERVAL])
+			return -EINVAL;
+
+		request->scan_plans[i].interval =
+			nla_get_u32(plan[NL80211_SCHED_SCAN_PLAN_INTERVAL]);
+		if (!request->scan_plans[i].interval ||
+		    request->scan_plans[i].interval >
+		    wiphy->max_sched_scan_plan_interval)
+			return -EINVAL;
+
+		if (plan[NL80211_SCHED_SCAN_PLAN_ITERATIONS]) {
+			request->scan_plans[i].iterations =
+				nla_get_u32(plan[NL80211_SCHED_SCAN_PLAN_ITERATIONS]);
+			if (!request->scan_plans[i].iterations ||
+			    (request->scan_plans[i].iterations >
+			     wiphy->max_sched_scan_plan_iterations))
+				return -EINVAL;
+		} else if (i < n_plans - 1) {
+			/*
+			 * All scan plans but the last one must specify
+			 * a finite number of iterations
+			 */
+			return -EINVAL;
+		}
+
+		i++;
+	}
+
+	/*
+	 * The last scan plan must not specify the number of
+	 * iterations, it is supposed to run infinitely
+	 */
+	if (request->scan_plans[n_plans - 1].iterations)
+		return  -EINVAL;
+
+	return 0;
+}
+
 static struct cfg80211_sched_scan_request *
 nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
 			 struct nlattr **attrs)
 {
 	struct cfg80211_sched_scan_request *request;
 	struct nlattr *attr;
-	int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i;
-	u32 interval;
+	int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i, n_plans = 0;
 	enum ieee80211_band band;
 	size_t ie_len;
 	struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
@@ -5976,13 +6090,6 @@
 	if (!is_valid_ie_attr(attrs[NL80211_ATTR_IE]))
 		return ERR_PTR(-EINVAL);
 
-	if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
-		return ERR_PTR(-EINVAL);
-
-	interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
-	if (interval == 0)
-		return ERR_PTR(-EINVAL);
-
 	if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
 		n_channels = validate_scan_freqs(
 				attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
@@ -6046,9 +6153,37 @@
 	if (ie_len > wiphy->max_sched_scan_ie_len)
 		return ERR_PTR(-EINVAL);
 
+	if (attrs[NL80211_ATTR_SCHED_SCAN_PLANS]) {
+		/*
+		 * NL80211_ATTR_SCHED_SCAN_INTERVAL must not be specified since
+		 * each scan plan already specifies its own interval
+		 */
+		if (attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
+			return ERR_PTR(-EINVAL);
+
+		nla_for_each_nested(attr,
+				    attrs[NL80211_ATTR_SCHED_SCAN_PLANS], tmp)
+			n_plans++;
+	} else {
+		/*
+		 * The scan interval attribute is kept for backward
+		 * compatibility. If no scan plans are specified and sched scan
+		 * interval is specified, one scan plan will be set with this
+		 * scan interval and infinite number of iterations.
+		 */
+		if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
+			return ERR_PTR(-EINVAL);
+
+		n_plans = 1;
+	}
+
+	if (!n_plans || n_plans > wiphy->max_sched_scan_plans)
+		return ERR_PTR(-EINVAL);
+
 	request = kzalloc(sizeof(*request)
 			+ sizeof(*request->ssids) * n_ssids
 			+ sizeof(*request->match_sets) * n_match_sets
+			+ sizeof(*request->scan_plans) * n_plans
 			+ sizeof(*request->channels) * n_channels
 			+ ie_len, GFP_KERNEL);
 	if (!request)
@@ -6076,6 +6211,18 @@
 	}
 	request->n_match_sets = n_match_sets;
 
+	if (n_match_sets)
+		request->scan_plans = (void *)(request->match_sets +
+					       n_match_sets);
+	else if (request->ie)
+		request->scan_plans = (void *)(request->ie + ie_len);
+	else if (n_ssids)
+		request->scan_plans = (void *)(request->ssids + n_ssids);
+	else
+		request->scan_plans = (void *)(request->channels + n_channels);
+
+	request->n_scan_plans = n_plans;
+
 	i = 0;
 	if (attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
 		/* user specified, bail out if channel not found */
@@ -6238,7 +6385,10 @@
 		request->delay =
 			nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
 
-	request->interval = interval;
+	err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs);
+	if (err)
+		goto out_free;
+
 	request->scan_start = jiffies;
 
 	return request;
@@ -6591,6 +6741,11 @@
 			jiffies_to_msecs(jiffies - intbss->ts)))
 		goto nla_put_failure;
 
+	if (intbss->ts_boottime &&
+	    nla_put_u64(msg, NL80211_BSS_LAST_SEEN_BOOTTIME,
+			intbss->ts_boottime))
+		goto nla_put_failure;
+
 	switch (rdev->wiphy.signal_type) {
 	case CFG80211_SIGNAL_TYPE_MBM:
 		if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
@@ -8831,7 +8986,7 @@
 static int nl80211_send_wowlan_nd(struct sk_buff *msg,
 				  struct cfg80211_sched_scan_request *req)
 {
-	struct nlattr *nd, *freqs, *matches, *match;
+	struct nlattr *nd, *freqs, *matches, *match, *scan_plans, *scan_plan;
 	int i;
 
 	if (!req)
@@ -8841,7 +8996,9 @@
 	if (!nd)
 		return -ENOBUFS;
 
-	if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL, req->interval))
+	if (req->n_scan_plans == 1 &&
+	    nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_INTERVAL,
+			req->scan_plans[0].interval * 1000))
 		return -ENOBUFS;
 
 	if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
@@ -8868,6 +9025,23 @@
 		nla_nest_end(msg, matches);
 	}
 
+	scan_plans = nla_nest_start(msg, NL80211_ATTR_SCHED_SCAN_PLANS);
+	if (!scan_plans)
+		return -ENOBUFS;
+
+	for (i = 0; i < req->n_scan_plans; i++) {
+		scan_plan = nla_nest_start(msg, i + 1);
+		if (!scan_plan ||
+		    nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL,
+				req->scan_plans[i].interval) ||
+		    (req->scan_plans[i].iterations &&
+		     nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_ITERATIONS,
+				 req->scan_plans[i].iterations)))
+			return -ENOBUFS;
+		nla_nest_end(msg, scan_plan);
+	}
+	nla_nest_end(msg, scan_plans);
+
 	nla_nest_end(msg, nd);
 
 	return 0;
@@ -9938,6 +10112,9 @@
 				if (!wdev->netdev && !wdev->p2p_started)
 					return -ENETDOWN;
 			}
+
+			if (!vcmd->doit)
+				return -EOPNOTSUPP;
 		} else {
 			wdev = NULL;
 		}
@@ -9957,6 +10134,193 @@
 	return -EOPNOTSUPP;
 }
 
+static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
+				       struct netlink_callback *cb,
+				       struct cfg80211_registered_device **rdev,
+				       struct wireless_dev **wdev)
+{
+	u32 vid, subcmd;
+	unsigned int i;
+	int vcmd_idx = -1;
+	int err;
+	void *data = NULL;
+	unsigned int data_len = 0;
+
+	rtnl_lock();
+
+	if (cb->args[0]) {
+		/* subtract the 1 again here */
+		struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
+		struct wireless_dev *tmp;
+
+		if (!wiphy) {
+			err = -ENODEV;
+			goto out_unlock;
+		}
+		*rdev = wiphy_to_rdev(wiphy);
+		*wdev = NULL;
+
+		if (cb->args[1]) {
+			list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
+				if (tmp->identifier == cb->args[1] - 1) {
+					*wdev = tmp;
+					break;
+				}
+			}
+		}
+
+		/* keep rtnl locked in successful case */
+		return 0;
+	}
+
+	err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
+			  nl80211_fam.attrbuf, nl80211_fam.maxattr,
+			  nl80211_policy);
+	if (err)
+		goto out_unlock;
+
+	if (!nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID] ||
+	    !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) {
+		err = -EINVAL;
+		goto out_unlock;
+	}
+
+	*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk),
+					   nl80211_fam.attrbuf);
+	if (IS_ERR(*wdev))
+		*wdev = NULL;
+
+	*rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk),
+					   nl80211_fam.attrbuf);
+	if (IS_ERR(*rdev)) {
+		err = PTR_ERR(*rdev);
+		goto out_unlock;
+	}
+
+	vid = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID]);
+	subcmd = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
+
+	for (i = 0; i < (*rdev)->wiphy.n_vendor_commands; i++) {
+		const struct wiphy_vendor_command *vcmd;
+
+		vcmd = &(*rdev)->wiphy.vendor_commands[i];
+
+		if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
+			continue;
+
+		if (!vcmd->dumpit) {
+			err = -EOPNOTSUPP;
+			goto out_unlock;
+		}
+
+		vcmd_idx = i;
+		break;
+	}
+
+	if (vcmd_idx < 0) {
+		err = -EOPNOTSUPP;
+		goto out_unlock;
+	}
+
+	if (nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]) {
+		data = nla_data(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]);
+		data_len = nla_len(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]);
+	}
+
+	/* 0 is the first index - add 1 to parse only once */
+	cb->args[0] = (*rdev)->wiphy_idx + 1;
+	/* add 1 to know if it was NULL */
+	cb->args[1] = *wdev ? (*wdev)->identifier + 1 : 0;
+	cb->args[2] = vcmd_idx;
+	cb->args[3] = (unsigned long)data;
+	cb->args[4] = data_len;
+
+	/* keep rtnl locked in successful case */
+	return 0;
+ out_unlock:
+	rtnl_unlock();
+	return err;
+}
+
+static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
+				   struct netlink_callback *cb)
+{
+	struct cfg80211_registered_device *rdev;
+	struct wireless_dev *wdev;
+	unsigned int vcmd_idx;
+	const struct wiphy_vendor_command *vcmd;
+	void *data;
+	int data_len;
+	int err;
+	struct nlattr *vendor_data;
+
+	err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
+	if (err)
+		return err;
+
+	vcmd_idx = cb->args[2];
+	data = (void *)cb->args[3];
+	data_len = cb->args[4];
+	vcmd = &rdev->wiphy.vendor_commands[vcmd_idx];
+
+	if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
+			   WIPHY_VENDOR_CMD_NEED_NETDEV)) {
+		if (!wdev)
+			return -EINVAL;
+		if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
+		    !wdev->netdev)
+			return -EINVAL;
+
+		if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
+			if (wdev->netdev &&
+			    !netif_running(wdev->netdev))
+				return -ENETDOWN;
+			if (!wdev->netdev && !wdev->p2p_started)
+				return -ENETDOWN;
+		}
+	}
+
+	while (1) {
+		void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).portid,
+					   cb->nlh->nlmsg_seq, NLM_F_MULTI,
+					   NL80211_CMD_VENDOR);
+		if (!hdr)
+			break;
+
+		if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+		    (wdev && nla_put_u64(skb, NL80211_ATTR_WDEV,
+					 wdev_id(wdev)))) {
+			genlmsg_cancel(skb, hdr);
+			break;
+		}
+
+		vendor_data = nla_nest_start(skb, NL80211_ATTR_VENDOR_DATA);
+		if (!vendor_data) {
+			genlmsg_cancel(skb, hdr);
+			break;
+		}
+
+		err = vcmd->dumpit(&rdev->wiphy, wdev, skb, data, data_len,
+				   (unsigned long *)&cb->args[5]);
+		nla_nest_end(skb, vendor_data);
+
+		if (err == -ENOBUFS || err == -ENOENT) {
+			genlmsg_cancel(skb, hdr);
+			break;
+		} else if (err) {
+			genlmsg_cancel(skb, hdr);
+			goto out;
+		}
+
+		genlmsg_end(skb, hdr);
+	}
+
+	err = skb->len;
+ out:
+	rtnl_unlock();
+	return err;
+}
+
 struct sk_buff *__cfg80211_alloc_reply_skb(struct wiphy *wiphy,
 					   enum nl80211_commands cmd,
 					   enum nl80211_attrs attr,
@@ -10533,6 +10897,7 @@
 		.internal_flags = NL80211_FLAG_NEED_RTNL,
 		/* can be retrieved by unprivileged users */
 	},
+#ifdef CONFIG_CFG80211_CRDA_SUPPORT
 	{
 		.cmd = NL80211_CMD_SET_REG,
 		.doit = nl80211_set_reg,
@@ -10540,6 +10905,7 @@
 		.flags = GENL_ADMIN_PERM,
 		.internal_flags = NL80211_FLAG_NEED_RTNL,
 	},
+#endif
 	{
 		.cmd = NL80211_CMD_REQ_SET_REG,
 		.doit = nl80211_req_set_reg,
@@ -10994,6 +11360,7 @@
 	{
 		.cmd = NL80211_CMD_VENDOR,
 		.doit = nl80211_vendor_cmd,
+		.dumpit = nl80211_vendor_cmd_dump,
 		.policy = nl80211_policy,
 		.flags = GENL_ADMIN_PERM,
 		.internal_flags = NL80211_FLAG_NEED_WIPHY |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2510b231..2e8d6f3 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -135,10 +135,7 @@
 /* Used to track the userspace process controlling the indoor setting */
 static u32 reg_is_indoor_portid;
 
-/* Max number of consecutive attempts to communicate with CRDA  */
-#define REG_MAX_CRDA_TIMEOUTS 10
-
-static u32 reg_crda_timeouts;
+static void restore_regulatory_settings(bool reset_user);
 
 static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
 {
@@ -226,9 +223,6 @@
 static void reg_todo(struct work_struct *work);
 static DECLARE_WORK(reg_work, reg_todo);
 
-static void reg_timeout_work(struct work_struct *work);
-static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
-
 /* We keep a static world regulatory domain in case of the absence of CRDA */
 static const struct ieee80211_regdomain world_regdom = {
 	.n_reg_rules = 8,
@@ -262,7 +256,7 @@
 		REG_RULE(5745-10, 5825+10, 80, 6, 20,
 			NL80211_RRF_NO_IR),
 
-		/* IEEE 802.11ad (60gHz), channels 1..3 */
+		/* IEEE 802.11ad (60GHz), channels 1..3 */
 		REG_RULE(56160+2160*1-1080, 56160+2160*3+1080, 2160, 0, 0, 0),
 	}
 };
@@ -279,6 +273,9 @@
 
 static void reg_free_request(struct regulatory_request *request)
 {
+	if (request == &core_request_world)
+		return;
+
 	if (request != get_last_request())
 		kfree(request);
 }
@@ -453,68 +450,70 @@
 }
 
 #ifdef CONFIG_CFG80211_INTERNAL_REGDB
-struct reg_regdb_search_request {
-	char alpha2[2];
+struct reg_regdb_apply_request {
 	struct list_head list;
+	const struct ieee80211_regdomain *regdom;
 };
 
-static LIST_HEAD(reg_regdb_search_list);
-static DEFINE_MUTEX(reg_regdb_search_mutex);
+static LIST_HEAD(reg_regdb_apply_list);
+static DEFINE_MUTEX(reg_regdb_apply_mutex);
 
-static void reg_regdb_search(struct work_struct *work)
+static void reg_regdb_apply(struct work_struct *work)
 {
-	struct reg_regdb_search_request *request;
-	const struct ieee80211_regdomain *curdom, *regdom = NULL;
-	int i;
+	struct reg_regdb_apply_request *request;
 
 	rtnl_lock();
 
-	mutex_lock(&reg_regdb_search_mutex);
-	while (!list_empty(&reg_regdb_search_list)) {
-		request = list_first_entry(&reg_regdb_search_list,
-					   struct reg_regdb_search_request,
+	mutex_lock(&reg_regdb_apply_mutex);
+	while (!list_empty(&reg_regdb_apply_list)) {
+		request = list_first_entry(&reg_regdb_apply_list,
+					   struct reg_regdb_apply_request,
 					   list);
 		list_del(&request->list);
 
-		for (i = 0; i < reg_regdb_size; i++) {
-			curdom = reg_regdb[i];
-
-			if (alpha2_equal(request->alpha2, curdom->alpha2)) {
-				regdom = reg_copy_regd(curdom);
-				break;
-			}
-		}
-
+		set_regdom(request->regdom, REGD_SOURCE_INTERNAL_DB);
 		kfree(request);
 	}
-	mutex_unlock(&reg_regdb_search_mutex);
-
-	if (!IS_ERR_OR_NULL(regdom))
-		set_regdom(regdom, REGD_SOURCE_INTERNAL_DB);
+	mutex_unlock(&reg_regdb_apply_mutex);
 
 	rtnl_unlock();
 }
 
-static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
+static DECLARE_WORK(reg_regdb_work, reg_regdb_apply);
 
-static void reg_regdb_query(const char *alpha2)
+static int reg_query_builtin(const char *alpha2)
 {
-	struct reg_regdb_search_request *request;
+	const struct ieee80211_regdomain *regdom = NULL;
+	struct reg_regdb_apply_request *request;
+	unsigned int i;
 
-	if (!alpha2)
-		return;
+	for (i = 0; i < reg_regdb_size; i++) {
+		if (alpha2_equal(alpha2, reg_regdb[i]->alpha2)) {
+			regdom = reg_regdb[i];
+			break;
+		}
+	}
 
-	request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL);
+	if (!regdom)
+		return -ENODATA;
+
+	request = kzalloc(sizeof(struct reg_regdb_apply_request), GFP_KERNEL);
 	if (!request)
-		return;
+		return -ENOMEM;
 
-	memcpy(request->alpha2, alpha2, 2);
+	request->regdom = reg_copy_regd(regdom);
+	if (IS_ERR_OR_NULL(request->regdom)) {
+		kfree(request);
+		return -ENOMEM;
+	}
 
-	mutex_lock(&reg_regdb_search_mutex);
-	list_add_tail(&request->list, &reg_regdb_search_list);
-	mutex_unlock(&reg_regdb_search_mutex);
+	mutex_lock(&reg_regdb_apply_mutex);
+	list_add_tail(&request->list, &reg_regdb_apply_list);
+	mutex_unlock(&reg_regdb_apply_mutex);
 
 	schedule_work(&reg_regdb_work);
+
+	return 0;
 }
 
 /* Feel free to add any other sanity checks here */
@@ -525,9 +524,45 @@
 }
 #else
 static inline void reg_regdb_size_check(void) {}
-static inline void reg_regdb_query(const char *alpha2) {}
+static inline int reg_query_builtin(const char *alpha2)
+{
+	return -ENODATA;
+}
 #endif /* CONFIG_CFG80211_INTERNAL_REGDB */
 
+#ifdef CONFIG_CFG80211_CRDA_SUPPORT
+/* Max number of consecutive attempts to communicate with CRDA  */
+#define REG_MAX_CRDA_TIMEOUTS 10
+
+static u32 reg_crda_timeouts;
+
+static void crda_timeout_work(struct work_struct *work);
+static DECLARE_DELAYED_WORK(crda_timeout, crda_timeout_work);
+
+static void crda_timeout_work(struct work_struct *work)
+{
+	REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
+	rtnl_lock();
+	reg_crda_timeouts++;
+	restore_regulatory_settings(true);
+	rtnl_unlock();
+}
+
+static void cancel_crda_timeout(void)
+{
+	cancel_delayed_work(&crda_timeout);
+}
+
+static void cancel_crda_timeout_sync(void)
+{
+	cancel_delayed_work_sync(&crda_timeout);
+}
+
+static void reset_crda_timeouts(void)
+{
+	reg_crda_timeouts = 0;
+}
+
 /*
  * This lets us keep regulatory code which is updated on a regulatory
  * basis in userspace.
@@ -536,13 +571,11 @@
 {
 	char country[12];
 	char *env[] = { country, NULL };
+	int ret;
 
 	snprintf(country, sizeof(country), "COUNTRY=%c%c",
 		 alpha2[0], alpha2[1]);
 
-	/* query internal regulatory database (if it exists) */
-	reg_regdb_query(alpha2);
-
 	if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) {
 		pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n");
 		return -EINVAL;
@@ -554,18 +587,34 @@
 	else
 		pr_debug("Calling CRDA to update world regulatory domain\n");
 
-	return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
-}
-
-static enum reg_request_treatment
-reg_call_crda(struct regulatory_request *request)
-{
-	if (call_crda(request->alpha2))
-		return REG_REQ_IGNORE;
+	ret = kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
+	if (ret)
+		return ret;
 
 	queue_delayed_work(system_power_efficient_wq,
-			   &reg_timeout, msecs_to_jiffies(3142));
-	return REG_REQ_OK;
+			   &crda_timeout, msecs_to_jiffies(3142));
+	return 0;
+}
+#else
+static inline void cancel_crda_timeout(void) {}
+static inline void cancel_crda_timeout_sync(void) {}
+static inline void reset_crda_timeouts(void) {}
+static inline int call_crda(const char *alpha2)
+{
+	return -ENODATA;
+}
+#endif /* CONFIG_CFG80211_CRDA_SUPPORT */
+
+static bool reg_query_database(struct regulatory_request *request)
+{
+	/* query internal regulatory database (if it exists) */
+	if (reg_query_builtin(request->alpha2) == 0)
+		return true;
+
+	if (call_crda(request->alpha2) == 0)
+		return true;
+
+	return false;
 }
 
 bool reg_is_valid_request(const char *alpha2)
@@ -1040,8 +1089,8 @@
 	return ERR_PTR(-EINVAL);
 }
 
-const struct ieee80211_reg_rule *__freq_reg_info(struct wiphy *wiphy,
-						 u32 center_freq, u32 min_bw)
+static const struct ieee80211_reg_rule *
+__freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw)
 {
 	const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy);
 	const struct ieee80211_reg_rule *reg_rule = NULL;
@@ -1081,11 +1130,11 @@
 }
 EXPORT_SYMBOL(reg_initiator_name);
 
-#ifdef CONFIG_CFG80211_REG_DEBUG
 static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
 				    struct ieee80211_channel *chan,
 				    const struct ieee80211_reg_rule *reg_rule)
 {
+#ifdef CONFIG_CFG80211_REG_DEBUG
 	const struct ieee80211_power_rule *power_rule;
 	const struct ieee80211_freq_range *freq_range;
 	char max_antenna_gain[32], bw[32];
@@ -1096,7 +1145,7 @@
 	if (!power_rule->max_antenna_gain)
 		snprintf(max_antenna_gain, sizeof(max_antenna_gain), "N/A");
 	else
-		snprintf(max_antenna_gain, sizeof(max_antenna_gain), "%d",
+		snprintf(max_antenna_gain, sizeof(max_antenna_gain), "%d mBi",
 			 power_rule->max_antenna_gain);
 
 	if (reg_rule->flags & NL80211_RRF_AUTO_BW)
@@ -1110,19 +1159,12 @@
 	REG_DBG_PRINT("Updating information on frequency %d MHz with regulatory rule:\n",
 		      chan->center_freq);
 
-	REG_DBG_PRINT("%d KHz - %d KHz @ %s), (%s mBi, %d mBm)\n",
+	REG_DBG_PRINT("(%d KHz - %d KHz @ %s), (%s, %d mBm)\n",
 		      freq_range->start_freq_khz, freq_range->end_freq_khz,
 		      bw, max_antenna_gain,
 		      power_rule->max_eirp);
-}
-#else
-static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
-				    struct ieee80211_channel *chan,
-				    const struct ieee80211_reg_rule *reg_rule)
-{
-	return;
-}
 #endif
+}
 
 /*
  * Note that right now we assume the desired channel bandwidth
@@ -1311,7 +1353,8 @@
 	return !(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS);
 }
 #else
-static int reg_ignore_cell_hint(struct regulatory_request *pending_request)
+static enum reg_request_treatment
+reg_ignore_cell_hint(struct regulatory_request *pending_request)
 {
 	return REG_REQ_IGNORE;
 }
@@ -1846,7 +1889,7 @@
 		need_more_processing = true;
 	spin_unlock(&reg_requests_lock);
 
-	cancel_delayed_work(&reg_timeout);
+	cancel_crda_timeout();
 
 	if (need_more_processing)
 		schedule_work(&reg_work);
@@ -1858,19 +1901,18 @@
  *
  * The wireless subsystem can use this function to process
  * a regulatory request issued by the regulatory core.
- *
- * Returns one of the different reg request treatment values.
  */
 static enum reg_request_treatment
 reg_process_hint_core(struct regulatory_request *core_request)
 {
+	if (reg_query_database(core_request)) {
+		core_request->intersect = false;
+		core_request->processed = false;
+		reg_update_last_request(core_request);
+		return REG_REQ_OK;
+	}
 
-	core_request->intersect = false;
-	core_request->processed = false;
-
-	reg_update_last_request(core_request);
-
-	return reg_call_crda(core_request);
+	return REG_REQ_IGNORE;
 }
 
 static enum reg_request_treatment
@@ -1915,8 +1957,6 @@
  *
  * The wireless subsystem can use this function to process
  * a regulatory request initiated by userspace.
- *
- * Returns one of the different reg request treatment values.
  */
 static enum reg_request_treatment
 reg_process_hint_user(struct regulatory_request *user_request)
@@ -1925,20 +1965,20 @@
 
 	treatment = __reg_process_hint_user(user_request);
 	if (treatment == REG_REQ_IGNORE ||
-	    treatment == REG_REQ_ALREADY_SET) {
-		reg_free_request(user_request);
-		return treatment;
-	}
+	    treatment == REG_REQ_ALREADY_SET)
+		return REG_REQ_IGNORE;
 
 	user_request->intersect = treatment == REG_REQ_INTERSECT;
 	user_request->processed = false;
 
-	reg_update_last_request(user_request);
+	if (reg_query_database(user_request)) {
+		reg_update_last_request(user_request);
+		user_alpha2[0] = user_request->alpha2[0];
+		user_alpha2[1] = user_request->alpha2[1];
+		return REG_REQ_OK;
+	}
 
-	user_alpha2[0] = user_request->alpha2[0];
-	user_alpha2[1] = user_request->alpha2[1];
-
-	return reg_call_crda(user_request);
+	return REG_REQ_IGNORE;
 }
 
 static enum reg_request_treatment
@@ -1986,16 +2026,12 @@
 	case REG_REQ_OK:
 		break;
 	case REG_REQ_IGNORE:
-		reg_free_request(driver_request);
-		return treatment;
+		return REG_REQ_IGNORE;
 	case REG_REQ_INTERSECT:
-		/* fall through */
 	case REG_REQ_ALREADY_SET:
 		regd = reg_copy_regd(get_cfg80211_regdom());
-		if (IS_ERR(regd)) {
-			reg_free_request(driver_request);
+		if (IS_ERR(regd))
 			return REG_REQ_IGNORE;
-		}
 
 		tmp = get_wiphy_regdom(wiphy);
 		rcu_assign_pointer(wiphy->regd, regd);
@@ -2006,8 +2042,6 @@
 	driver_request->intersect = treatment == REG_REQ_INTERSECT;
 	driver_request->processed = false;
 
-	reg_update_last_request(driver_request);
-
 	/*
 	 * Since CRDA will not be called in this case as we already
 	 * have applied the requested regulatory domain before we just
@@ -2015,11 +2049,17 @@
 	 */
 	if (treatment == REG_REQ_ALREADY_SET) {
 		nl80211_send_reg_change_event(driver_request);
+		reg_update_last_request(driver_request);
 		reg_set_request_processed();
-		return treatment;
+		return REG_REQ_ALREADY_SET;
 	}
 
-	return reg_call_crda(driver_request);
+	if (reg_query_database(driver_request)) {
+		reg_update_last_request(driver_request);
+		return REG_REQ_OK;
+	}
+
+	return REG_REQ_IGNORE;
 }
 
 static enum reg_request_treatment
@@ -2085,12 +2125,11 @@
 	case REG_REQ_OK:
 		break;
 	case REG_REQ_IGNORE:
-		/* fall through */
+		return REG_REQ_IGNORE;
 	case REG_REQ_ALREADY_SET:
 		reg_free_request(country_ie_request);
-		return treatment;
+		return REG_REQ_ALREADY_SET;
 	case REG_REQ_INTERSECT:
-		reg_free_request(country_ie_request);
 		/*
 		 * This doesn't happen yet, not sure we
 		 * ever want to support it for this case.
@@ -2102,9 +2141,12 @@
 	country_ie_request->intersect = false;
 	country_ie_request->processed = false;
 
-	reg_update_last_request(country_ie_request);
+	if (reg_query_database(country_ie_request)) {
+		reg_update_last_request(country_ie_request);
+		return REG_REQ_OK;
+	}
 
-	return reg_call_crda(country_ie_request);
+	return REG_REQ_IGNORE;
 }
 
 /* This processes *all* regulatory hints */
@@ -2118,11 +2160,11 @@
 
 	switch (reg_request->initiator) {
 	case NL80211_REGDOM_SET_BY_CORE:
-		reg_process_hint_core(reg_request);
-		return;
+		treatment = reg_process_hint_core(reg_request);
+		break;
 	case NL80211_REGDOM_SET_BY_USER:
-		reg_process_hint_user(reg_request);
-		return;
+		treatment = reg_process_hint_user(reg_request);
+		break;
 	case NL80211_REGDOM_SET_BY_DRIVER:
 		if (!wiphy)
 			goto out_free;
@@ -2138,6 +2180,12 @@
 		goto out_free;
 	}
 
+	if (treatment == REG_REQ_IGNORE)
+		goto out_free;
+
+	WARN(treatment != REG_REQ_OK && treatment != REG_REQ_ALREADY_SET,
+	     "unexpected treatment value %d\n", treatment);
+
 	/* This is required so that the orig_* parameters are saved.
 	 * NOTE: treatment must be set for any case that reaches here!
 	 */
@@ -2345,7 +2393,7 @@
 	request->user_reg_hint_type = user_reg_hint_type;
 
 	/* Allow calling CRDA again */
-	reg_crda_timeouts = 0;
+	reset_crda_timeouts();
 
 	queue_regulatory_request(request);
 
@@ -2417,7 +2465,7 @@
 	request->initiator = NL80211_REGDOM_SET_BY_DRIVER;
 
 	/* Allow calling CRDA again */
-	reg_crda_timeouts = 0;
+	reset_crda_timeouts();
 
 	queue_regulatory_request(request);
 
@@ -2473,7 +2521,7 @@
 	request->country_ie_env = env;
 
 	/* Allow calling CRDA again */
-	reg_crda_timeouts = 0;
+	reset_crda_timeouts();
 
 	queue_regulatory_request(request);
 	request = NULL;
@@ -2874,11 +2922,8 @@
 	}
 
 	request_wiphy = wiphy_idx_to_wiphy(driver_request->wiphy_idx);
-	if (!request_wiphy) {
-		queue_delayed_work(system_power_efficient_wq,
-				   &reg_timeout, 0);
+	if (!request_wiphy)
 		return -ENODEV;
-	}
 
 	if (!driver_request->intersect) {
 		if (request_wiphy->regd)
@@ -2935,11 +2980,8 @@
 	}
 
 	request_wiphy = wiphy_idx_to_wiphy(country_ie_request->wiphy_idx);
-	if (!request_wiphy) {
-		queue_delayed_work(system_power_efficient_wq,
-				   &reg_timeout, 0);
+	if (!request_wiphy)
 		return -ENODEV;
-	}
 
 	if (country_ie_request->intersect)
 		return -EINVAL;
@@ -2966,7 +3008,7 @@
 	}
 
 	if (regd_src == REGD_SOURCE_CRDA)
-		reg_crda_timeouts = 0;
+		reset_crda_timeouts();
 
 	lr = get_last_request();
 
@@ -3123,15 +3165,6 @@
 	lr->country_ie_env = ENVIRON_ANY;
 }
 
-static void reg_timeout_work(struct work_struct *work)
-{
-	REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
-	rtnl_lock();
-	reg_crda_timeouts++;
-	restore_regulatory_settings(true);
-	rtnl_unlock();
-}
-
 /*
  * See http://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii, for
  * UNII band definitions
@@ -3217,7 +3250,7 @@
 	struct reg_beacon *reg_beacon, *btmp;
 
 	cancel_work_sync(&reg_work);
-	cancel_delayed_work_sync(&reg_timeout);
+	cancel_crda_timeout_sync();
 	cancel_delayed_work_sync(&reg_check_chans);
 
 	/* Lock to suppress warnings */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 3a50aa2..14d5369e 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -266,8 +266,7 @@
 			spin_lock_bh(&rdev->bss_lock);
 			__cfg80211_bss_expire(rdev, request->scan_start);
 			spin_unlock_bh(&rdev->bss_lock);
-			request->scan_start =
-				jiffies + msecs_to_jiffies(request->interval);
+			request->scan_start = jiffies;
 		}
 		nl80211_send_sched_scan_results(rdev, request->dev);
 	}
@@ -839,6 +838,7 @@
 			found->pub.signal = tmp->pub.signal;
 		found->pub.capability = tmp->pub.capability;
 		found->ts = tmp->ts;
+		found->ts_boottime = tmp->ts_boottime;
 	} else {
 		struct cfg80211_internal_bss *new;
 		struct cfg80211_internal_bss *hidden;
@@ -938,14 +938,13 @@
 }
 
 /* Returned bss is reference counted and must be cleaned up appropriately. */
-struct cfg80211_bss*
-cfg80211_inform_bss_width(struct wiphy *wiphy,
-			  struct ieee80211_channel *rx_channel,
-			  enum nl80211_bss_scan_width scan_width,
-			  enum cfg80211_bss_frame_type ftype,
-			  const u8 *bssid, u64 tsf, u16 capability,
-			  u16 beacon_interval, const u8 *ie, size_t ielen,
-			  s32 signal, gfp_t gfp)
+struct cfg80211_bss *
+cfg80211_inform_bss_data(struct wiphy *wiphy,
+			 struct cfg80211_inform_bss *data,
+			 enum cfg80211_bss_frame_type ftype,
+			 const u8 *bssid, u64 tsf, u16 capability,
+			 u16 beacon_interval, const u8 *ie, size_t ielen,
+			 gfp_t gfp)
 {
 	struct cfg80211_bss_ies *ies;
 	struct ieee80211_channel *channel;
@@ -957,19 +956,21 @@
 		return NULL;
 
 	if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
-			(signal < 0 || signal > 100)))
+		    (data->signal < 0 || data->signal > 100)))
 		return NULL;
 
-	channel = cfg80211_get_bss_channel(wiphy, ie, ielen, rx_channel);
+	channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
 	if (!channel)
 		return NULL;
 
 	memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
 	tmp.pub.channel = channel;
-	tmp.pub.scan_width = scan_width;
-	tmp.pub.signal = signal;
+	tmp.pub.scan_width = data->scan_width;
+	tmp.pub.signal = data->signal;
 	tmp.pub.beacon_interval = beacon_interval;
 	tmp.pub.capability = capability;
+	tmp.ts_boottime = data->boottime_ns;
+
 	/*
 	 * If we do not know here whether the IEs are from a Beacon or Probe
 	 * Response frame, we need to pick one of the options and only use it
@@ -999,7 +1000,7 @@
 	}
 	rcu_assign_pointer(tmp.pub.ies, ies);
 
-	signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
+	signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
 		wiphy->max_adj_channel_rssi_comp;
 	res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid);
 	if (!res)
@@ -1019,15 +1020,15 @@
 	/* cfg80211_bss_update gives us a referenced result */
 	return &res->pub;
 }
-EXPORT_SYMBOL(cfg80211_inform_bss_width);
+EXPORT_SYMBOL(cfg80211_inform_bss_data);
 
-/* Returned bss is reference counted and must be cleaned up appropriately. */
+/* cfg80211_inform_bss_width_frame helper */
 struct cfg80211_bss *
-cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
-				struct ieee80211_channel *rx_channel,
-				enum nl80211_bss_scan_width scan_width,
-				struct ieee80211_mgmt *mgmt, size_t len,
-				s32 signal, gfp_t gfp)
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+			       struct cfg80211_inform_bss *data,
+			       struct ieee80211_mgmt *mgmt, size_t len,
+			       gfp_t gfp)
+
 {
 	struct cfg80211_internal_bss tmp = {}, *res;
 	struct cfg80211_bss_ies *ies;
@@ -1040,8 +1041,7 @@
 	BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
 			offsetof(struct ieee80211_mgmt, u.beacon.variable));
 
-	trace_cfg80211_inform_bss_width_frame(wiphy, rx_channel, scan_width, mgmt,
-					      len, signal);
+	trace_cfg80211_inform_bss_frame(wiphy, data, mgmt, len);
 
 	if (WARN_ON(!mgmt))
 		return NULL;
@@ -1050,14 +1050,14 @@
 		return NULL;
 
 	if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC &&
-		    (signal < 0 || signal > 100)))
+		    (data->signal < 0 || data->signal > 100)))
 		return NULL;
 
 	if (WARN_ON(len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable)))
 		return NULL;
 
 	channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
-					   ielen, rx_channel);
+					   ielen, data->chan);
 	if (!channel)
 		return NULL;
 
@@ -1077,12 +1077,13 @@
 	
 	memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
 	tmp.pub.channel = channel;
-	tmp.pub.scan_width = scan_width;
-	tmp.pub.signal = signal;
+	tmp.pub.scan_width = data->scan_width;
+	tmp.pub.signal = data->signal;
 	tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
 	tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
+	tmp.ts_boottime = data->boottime_ns;
 
-	signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
+	signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
 		wiphy->max_adj_channel_rssi_comp;
 	res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid);
 	if (!res)
@@ -1102,7 +1103,7 @@
 	/* cfg80211_bss_update gives us a referenced result */
 	return &res->pub;
 }
-EXPORT_SYMBOL(cfg80211_inform_bss_width_frame);
+EXPORT_SYMBOL(cfg80211_inform_bss_frame_data);
 
 void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
 {
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index a808279..0c392d3 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2670,30 +2670,30 @@
 		  __entry->privacy)
 );
 
-TRACE_EVENT(cfg80211_inform_bss_width_frame,
-	TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
-		 enum nl80211_bss_scan_width scan_width,
-		 struct ieee80211_mgmt *mgmt, size_t len,
-		 s32 signal),
-	TP_ARGS(wiphy, channel, scan_width, mgmt, len, signal),
+TRACE_EVENT(cfg80211_inform_bss_frame,
+	TP_PROTO(struct wiphy *wiphy, struct cfg80211_inform_bss *data,
+		 struct ieee80211_mgmt *mgmt, size_t len),
+	TP_ARGS(wiphy, data, mgmt, len),
 	TP_STRUCT__entry(
 		WIPHY_ENTRY
 		CHAN_ENTRY
 		__field(enum nl80211_bss_scan_width, scan_width)
 		__dynamic_array(u8, mgmt, len)
 		__field(s32, signal)
+		__field(u64, ts_boottime)
 	),
 	TP_fast_assign(
 		WIPHY_ASSIGN;
-		CHAN_ASSIGN(channel);
-		__entry->scan_width = scan_width;
+		CHAN_ASSIGN(data->chan);
+		__entry->scan_width = data->scan_width;
 		if (mgmt)
 			memcpy(__get_dynamic_array(mgmt), mgmt, len);
-		__entry->signal = signal;
+		__entry->signal = data->signal;
+		__entry->ts_boottime = data->boottime_ns;
 	),
-	TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d",
+	TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d, tsb:%llu",
 		  WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width,
-		  __entry->signal)
+		  __entry->signal, (unsigned long long)__entry->ts_boottime)
 );
 
 DECLARE_EVENT_CLASS(cfg80211_bss_evt,
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index c48a4b8..cc3676e 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -136,12 +136,12 @@
 	while (likely((err = xfrm_output_one(skb, err)) == 0)) {
 		nf_reset(skb);
 
-		err = skb_dst(skb)->ops->local_out(skb);
+		err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
 		if (unlikely(err != 1))
 			goto out;
 
 		if (!skb_dst(skb)->xfrm)
-			return dst_output(skb->sk, skb);
+			return dst_output(net, skb->sk, skb);
 
 		err = nf_hook(skb_dst(skb)->ops->family,
 			      NF_INET_POST_ROUTING, net, skb->sk, skb,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 418daa0..09bfcba 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1887,6 +1887,7 @@
 	struct sock *sk;
 	struct dst_entry *dst;
 	struct xfrm_policy *pol = (struct xfrm_policy *)arg;
+	struct net *net = xp_net(pol);
 	struct xfrm_policy_queue *pq = &pol->polq;
 	struct flowi fl;
 	struct sk_buff_head list;
@@ -1903,8 +1904,7 @@
 	spin_unlock(&pq->hold_queue.lock);
 
 	dst_hold(dst->path);
-	dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
-			  sk, 0);
+	dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
 	if (IS_ERR(dst))
 		goto purge_queue;
 
@@ -1934,8 +1934,7 @@
 
 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
 		dst_hold(skb_dst(skb)->path);
-		dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
-				  &fl, skb->sk, 0);
+		dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
 		if (IS_ERR(dst)) {
 			kfree_skb(skb);
 			continue;
@@ -1945,7 +1944,7 @@
 		skb_dst_drop(skb);
 		skb_dst_set(skb, dst);
 
-		dst_output(skb->sk, skb);
+		dst_output(net, skb->sk, skb);
 	}
 
 out:
@@ -1958,7 +1957,7 @@
 	xfrm_pol_put(pol);
 }
 
-static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
+static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
 	unsigned long sched_next;
 	struct dst_entry *dst = skb_dst(skb);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 639e0d5..805681a 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1931,8 +1931,10 @@
 	struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
 	struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
 	struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
+	struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
+	struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
 
-	if (!lt && !rp && !re)
+	if (!lt && !rp && !re && !et && !rt)
 		return err;
 
 	/* pedantic mode - thou shalt sayeth replaceth */
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 63e7d50..b305145 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -13,6 +13,7 @@
 hostprogs-y += tracex4
 hostprogs-y += tracex5
 hostprogs-y += tracex6
+hostprogs-y += trace_output
 hostprogs-y += lathist
 
 test_verifier-objs := test_verifier.o libbpf.o
@@ -27,6 +28,7 @@
 tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
 tracex5-objs := bpf_load.o libbpf.o tracex5_user.o
 tracex6-objs := bpf_load.o libbpf.o tracex6_user.o
+trace_output-objs := bpf_load.o libbpf.o trace_output_user.o
 lathist-objs := bpf_load.o libbpf.o lathist_user.o
 
 # Tell kbuild to always build the programs
@@ -40,6 +42,7 @@
 always += tracex4_kern.o
 always += tracex5_kern.o
 always += tracex6_kern.o
+always += trace_output_kern.o
 always += tcbpf1_kern.o
 always += lathist_kern.o
 
@@ -55,6 +58,7 @@
 HOSTLOADLIBES_tracex4 += -lelf -lrt
 HOSTLOADLIBES_tracex5 += -lelf
 HOSTLOADLIBES_tracex6 += -lelf
+HOSTLOADLIBES_trace_output += -lelf -lrt
 HOSTLOADLIBES_lathist += -lelf
 
 # point this to your LLVM backend with bpf support
@@ -64,3 +68,6 @@
 	clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
 		-D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
 		-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
+	clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
+		-D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
+		-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index 21aa1b4..b35c21e 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -37,6 +37,8 @@
 	(void *) BPF_FUNC_clone_redirect;
 static int (*bpf_redirect)(int ifindex, int flags) =
 	(void *) BPF_FUNC_redirect;
+static int (*bpf_perf_event_output)(void *ctx, void *map, int index, void *data, int size) =
+	(void *) BPF_FUNC_perf_event_output;
 
 /* llvm builtin functions that eBPF C program may use to
  * emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/samples/bpf/libbpf.h b/samples/bpf/libbpf.h
index 7235e29..b7f63c70 100644
--- a/samples/bpf/libbpf.h
+++ b/samples/bpf/libbpf.h
@@ -64,6 +64,14 @@
 		.off   = 0,					\
 		.imm   = 0 })
 
+#define BPF_MOV32_REG(DST, SRC)					\
+	((struct bpf_insn) {					\
+		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
+		.dst_reg = DST,					\
+		.src_reg = SRC,					\
+		.off   = 0,					\
+		.imm   = 0 })
+
 /* Short form of mov, dst_reg = imm32 */
 
 #define BPF_MOV64_IMM(DST, IMM)					\
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index ee0f110..563c507 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -15,20 +15,27 @@
 #include <string.h>
 #include <linux/filter.h>
 #include <stddef.h>
+#include <stdbool.h>
+#include <sys/resource.h>
 #include "libbpf.h"
 
 #define MAX_INSNS 512
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
 
+#define MAX_FIXUPS 8
+
 struct bpf_test {
 	const char *descr;
 	struct bpf_insn	insns[MAX_INSNS];
-	int fixup[32];
+	int fixup[MAX_FIXUPS];
+	int prog_array_fixup[MAX_FIXUPS];
 	const char *errstr;
+	const char *errstr_unpriv;
 	enum {
+		UNDEF,
 		ACCEPT,
 		REJECT
-	} result;
+	} result, result_unpriv;
 	enum bpf_prog_type prog_type;
 };
 
@@ -96,6 +103,7 @@
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "invalid BPF_LD_IMM insn",
+		.errstr_unpriv = "R1 pointer comparison",
 		.result = REJECT,
 	},
 	{
@@ -109,6 +117,7 @@
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "invalid BPF_LD_IMM insn",
+		.errstr_unpriv = "R1 pointer comparison",
 		.result = REJECT,
 	},
 	{
@@ -219,6 +228,7 @@
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "R0 !read_ok",
+		.errstr_unpriv = "R1 pointer comparison",
 		.result = REJECT,
 	},
 	{
@@ -294,7 +304,9 @@
 			BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "R0 leaks addr",
 		.result = ACCEPT,
+		.result_unpriv = REJECT,
 	},
 	{
 		"check corrupted spill/fill",
@@ -310,6 +322,7 @@
 
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "attempt to corrupt spilled",
 		.errstr = "corrupted spill",
 		.result = REJECT,
 	},
@@ -473,6 +486,7 @@
 		},
 		.fixup = {3},
 		.errstr = "R0 invalid mem access",
+		.errstr_unpriv = "R0 leaks addr",
 		.result = REJECT,
 	},
 	{
@@ -495,6 +509,8 @@
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "R1 pointer comparison",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
 	{
@@ -521,6 +537,8 @@
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "R1 pointer comparison",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
 	{
@@ -555,6 +573,8 @@
 			BPF_EXIT_INSN(),
 		},
 		.fixup = {24},
+		.errstr_unpriv = "R1 pointer comparison",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
 	{
@@ -603,6 +623,8 @@
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "R1 pointer comparison",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
 	{
@@ -642,6 +664,8 @@
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "R1 pointer comparison",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 	},
 	{
@@ -699,6 +723,7 @@
 		},
 		.fixup = {4},
 		.errstr = "different pointers",
+		.errstr_unpriv = "R1 pointer comparison",
 		.result = REJECT,
 	},
 	{
@@ -720,6 +745,7 @@
 		},
 		.fixup = {6},
 		.errstr = "different pointers",
+		.errstr_unpriv = "R1 pointer comparison",
 		.result = REJECT,
 	},
 	{
@@ -742,6 +768,7 @@
 		},
 		.fixup = {7},
 		.errstr = "different pointers",
+		.errstr_unpriv = "R1 pointer comparison",
 		.result = REJECT,
 	},
 	{
@@ -752,6 +779,7 @@
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "invalid bpf_context access",
+		.errstr_unpriv = "R1 leaks addr",
 		.result = REJECT,
 	},
 	{
@@ -762,6 +790,7 @@
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "invalid bpf_context access",
+		.errstr_unpriv = "R1 leaks addr",
 		.result = REJECT,
 	},
 	{
@@ -772,16 +801,18 @@
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "invalid bpf_context access",
+		.errstr_unpriv = "R1 leaks addr",
 		.result = REJECT,
 	},
 	{
 		"check out of range skb->cb access",
 		.insns = {
 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
-				    offsetof(struct __sk_buff, cb[60])),
+				    offsetof(struct __sk_buff, cb[0]) + 256),
 			BPF_EXIT_INSN(),
 		},
 		.errstr = "invalid bpf_context access",
+		.errstr_unpriv = "",
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_SCHED_ACT,
 	},
@@ -803,6 +834,8 @@
 			BPF_EXIT_INSN(),
 		},
 		.result = ACCEPT,
+		.errstr_unpriv = "R1 leaks addr",
+		.result_unpriv = REJECT,
 	},
 	{
 		"write skb fields from tc_cls_act prog",
@@ -819,6 +852,8 @@
 				    offsetof(struct __sk_buff, cb[3])),
 			BPF_EXIT_INSN(),
 		},
+		.errstr_unpriv = "",
+		.result_unpriv = REJECT,
 		.result = ACCEPT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 	},
@@ -881,6 +916,270 @@
 		.result = REJECT,
 		.errstr = "invalid stack off=0 size=8",
 	},
+	{
+		"unpriv: return pointer",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 leaks addr",
+	},
+	{
+		"unpriv: add const to pointer",
+		.insns = {
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R1 pointer arithmetic",
+	},
+	{
+		"unpriv: add pointer to pointer",
+		.insns = {
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R1 pointer arithmetic",
+	},
+	{
+		"unpriv: neg pointer",
+		.insns = {
+			BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R1 pointer arithmetic",
+	},
+	{
+		"unpriv: cmp pointer with const",
+		.insns = {
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R1 pointer comparison",
+	},
+	{
+		"unpriv: cmp pointer with pointer",
+		.insns = {
+			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R10 pointer comparison",
+	},
+	{
+		"unpriv: check that printk is disallowed",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_MOV64_IMM(BPF_REG_2, 8),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "unknown func 6",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: pass pointer to helper function",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+			BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup = {3},
+		.errstr_unpriv = "R4 leaks addr",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: indirectly pass pointer on stack to helper function",
+		.insns = {
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup = {3},
+		.errstr = "invalid indirect read from stack off -8+0 size 8",
+		.result = REJECT,
+	},
+	{
+		"unpriv: mangle pointer on stack 1",
+		.insns = {
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+			BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "attempt to corrupt spilled",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: mangle pointer on stack 2",
+		.insns = {
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+			BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "attempt to corrupt spilled",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: read pointer from stack in small chunks",
+		.insns = {
+			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr = "invalid size",
+		.result = REJECT,
+	},
+	{
+		"unpriv: write pointer into ctx",
+		.insns = {
+			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R1 leaks addr",
+		.result_unpriv = REJECT,
+		.errstr = "invalid bpf_context access",
+		.result = REJECT,
+	},
+	{
+		"unpriv: write pointer into map elem value",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+			BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup = {3},
+		.errstr_unpriv = "R0 leaks addr",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: partial copy of pointer",
+		.insns = {
+			BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R10 partial copy",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: pass pointer to tail_call",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+			BPF_LD_MAP_FD(BPF_REG_2, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.prog_array_fixup = {1},
+		.errstr_unpriv = "R3 leaks addr into helper",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: cmp map pointer with zero",
+		.insns = {
+			BPF_MOV64_IMM(BPF_REG_1, 0),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup = {1},
+		.errstr_unpriv = "R1 pointer comparison",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: write into frame pointer",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr = "frame pointer is read only",
+		.result = REJECT,
+	},
+	{
+		"unpriv: cmp of frame pointer",
+		.insns = {
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R10 pointer comparison",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: cmp of stack pointer",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R2 pointer comparison",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
+	{
+		"unpriv: obfuscate stack pointer",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.errstr_unpriv = "R2 pointer arithmetic",
+		.result_unpriv = REJECT,
+		.result = ACCEPT,
+	},
 };
 
 static int probe_filter_length(struct bpf_insn *fp)
@@ -896,13 +1195,24 @@
 
 static int create_map(void)
 {
-	long long key, value = 0;
 	int map_fd;
 
-	map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), 1024);
-	if (map_fd < 0) {
+	map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
+				sizeof(long long), sizeof(long long), 1024);
+	if (map_fd < 0)
 		printf("failed to create map '%s'\n", strerror(errno));
-	}
+
+	return map_fd;
+}
+
+static int create_prog_array(void)
+{
+	int map_fd;
+
+	map_fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY,
+				sizeof(int), sizeof(int), 4);
+	if (map_fd < 0)
+		printf("failed to create prog_array '%s'\n", strerror(errno));
 
 	return map_fd;
 }
@@ -910,13 +1220,17 @@
 static int test(void)
 {
 	int prog_fd, i, pass_cnt = 0, err_cnt = 0;
+	bool unpriv = geteuid() != 0;
 
 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
 		struct bpf_insn *prog = tests[i].insns;
 		int prog_type = tests[i].prog_type;
 		int prog_len = probe_filter_length(prog);
 		int *fixup = tests[i].fixup;
-		int map_fd = -1;
+		int *prog_array_fixup = tests[i].prog_array_fixup;
+		int expected_result;
+		const char *expected_errstr;
+		int map_fd = -1, prog_array_fd = -1;
 
 		if (*fixup) {
 			map_fd = create_map();
@@ -926,13 +1240,31 @@
 				fixup++;
 			} while (*fixup);
 		}
+		if (*prog_array_fixup) {
+			prog_array_fd = create_prog_array();
+
+			do {
+				prog[*prog_array_fixup].imm = prog_array_fd;
+				prog_array_fixup++;
+			} while (*prog_array_fixup);
+		}
 		printf("#%d %s ", i, tests[i].descr);
 
 		prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
 					prog, prog_len * sizeof(struct bpf_insn),
 					"GPL", 0);
 
-		if (tests[i].result == ACCEPT) {
+		if (unpriv && tests[i].result_unpriv != UNDEF)
+			expected_result = tests[i].result_unpriv;
+		else
+			expected_result = tests[i].result;
+
+		if (unpriv && tests[i].errstr_unpriv)
+			expected_errstr = tests[i].errstr_unpriv;
+		else
+			expected_errstr = tests[i].errstr;
+
+		if (expected_result == ACCEPT) {
 			if (prog_fd < 0) {
 				printf("FAIL\nfailed to load prog '%s'\n",
 				       strerror(errno));
@@ -947,7 +1279,7 @@
 				err_cnt++;
 				goto fail;
 			}
-			if (strstr(bpf_log_buf, tests[i].errstr) == 0) {
+			if (strstr(bpf_log_buf, expected_errstr) == 0) {
 				printf("FAIL\nunexpected error message: %s",
 				       bpf_log_buf);
 				err_cnt++;
@@ -960,6 +1292,8 @@
 fail:
 		if (map_fd >= 0)
 			close(map_fd);
+		if (prog_array_fd >= 0)
+			close(prog_array_fd);
 		close(prog_fd);
 
 	}
@@ -970,5 +1304,8 @@
 
 int main(void)
 {
+	struct rlimit r = {1 << 20, 1 << 20};
+
+	setrlimit(RLIMIT_MEMLOCK, &r);
 	return test();
 }
diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
new file mode 100644
index 0000000..8d8d1ec
--- /dev/null
+++ b/samples/bpf/trace_output_kern.c
@@ -0,0 +1,31 @@
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") my_map = {
+	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+	.key_size = sizeof(int),
+	.value_size = sizeof(u32),
+	.max_entries = 2,
+};
+
+SEC("kprobe/sys_write")
+int bpf_prog1(struct pt_regs *ctx)
+{
+	struct S {
+		u64 pid;
+		u64 cookie;
+	} data;
+
+	memset(&data, 0, sizeof(data));
+	data.pid = bpf_get_current_pid_tgid();
+	data.cookie = 0x12345678;
+
+	bpf_perf_event_output(ctx, &my_map, 0, &data, sizeof(data));
+
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c
new file mode 100644
index 0000000..661a7d0
--- /dev/null
+++ b/samples/bpf/trace_output_user.c
@@ -0,0 +1,196 @@
+/* This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/syscall.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <time.h>
+#include <signal.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+static int pmu_fd;
+
+int page_size;
+int page_cnt = 8;
+volatile struct perf_event_mmap_page *header;
+
+typedef void (*print_fn)(void *data, int size);
+
+static int perf_event_mmap(int fd)
+{
+	void *base;
+	int mmap_size;
+
+	page_size = getpagesize();
+	mmap_size = page_size * (page_cnt + 1);
+
+	base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+	if (base == MAP_FAILED) {
+		printf("mmap err\n");
+		return -1;
+	}
+
+	header = base;
+	return 0;
+}
+
+static int perf_event_poll(int fd)
+{
+	struct pollfd pfd = { .fd = fd, .events = POLLIN };
+
+	return poll(&pfd, 1, 1000);
+}
+
+struct perf_event_sample {
+	struct perf_event_header header;
+	__u32 size;
+	char data[];
+};
+
+void perf_event_read(print_fn fn)
+{
+	__u64 data_tail = header->data_tail;
+	__u64 data_head = header->data_head;
+	__u64 buffer_size = page_cnt * page_size;
+	void *base, *begin, *end;
+	char buf[256];
+
+	asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
+	if (data_head == data_tail)
+		return;
+
+	base = ((char *)header) + page_size;
+
+	begin = base + data_tail % buffer_size;
+	end = base + data_head % buffer_size;
+
+	while (begin != end) {
+		struct perf_event_sample *e;
+
+		e = begin;
+		if (begin + e->header.size > base + buffer_size) {
+			long len = base + buffer_size - begin;
+
+			assert(len < e->header.size);
+			memcpy(buf, begin, len);
+			memcpy(buf + len, base, e->header.size - len);
+			e = (void *) buf;
+			begin = base + e->header.size - len;
+		} else if (begin + e->header.size == base + buffer_size) {
+			begin = base;
+		} else {
+			begin += e->header.size;
+		}
+
+		if (e->header.type == PERF_RECORD_SAMPLE) {
+			fn(e->data, e->size);
+		} else if (e->header.type == PERF_RECORD_LOST) {
+			struct {
+				struct perf_event_header header;
+				__u64 id;
+				__u64 lost;
+			} *lost = (void *) e;
+			printf("lost %lld events\n", lost->lost);
+		} else {
+			printf("unknown event type=%d size=%d\n",
+			       e->header.type, e->header.size);
+		}
+	}
+
+	__sync_synchronize(); /* smp_mb() */
+	header->data_tail = data_head;
+}
+
+static __u64 time_get_ns(void)
+{
+	struct timespec ts;
+
+	clock_gettime(CLOCK_MONOTONIC, &ts);
+	return ts.tv_sec * 1000000000ull + ts.tv_nsec;
+}
+
+static __u64 start_time;
+
+#define MAX_CNT 100000ll
+
+static void print_bpf_output(void *data, int size)
+{
+	static __u64 cnt;
+	struct {
+		__u64 pid;
+		__u64 cookie;
+	} *e = data;
+
+	if (e->cookie != 0x12345678) {
+		printf("BUG pid %llx cookie %llx sized %d\n",
+		       e->pid, e->cookie, size);
+		kill(0, SIGINT);
+	}
+
+	cnt++;
+
+	if (cnt == MAX_CNT) {
+		printf("recv %lld events per sec\n",
+		       MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+		kill(0, SIGINT);
+	}
+}
+
+static void test_bpf_perf_event(void)
+{
+	struct perf_event_attr attr = {
+		.sample_type = PERF_SAMPLE_RAW,
+		.type = PERF_TYPE_SOFTWARE,
+		.config = PERF_COUNT_SW_BPF_OUTPUT,
+	};
+	int key = 0;
+
+	pmu_fd = perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
+
+	assert(pmu_fd >= 0);
+	assert(bpf_update_elem(map_fd[0], &key, &pmu_fd, BPF_ANY) == 0);
+	ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+}
+
+int main(int argc, char **argv)
+{
+	char filename[256];
+	FILE *f;
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+	if (load_bpf_file(filename)) {
+		printf("%s", bpf_log_buf);
+		return 1;
+	}
+
+	test_bpf_perf_event();
+
+	if (perf_event_mmap(pmu_fd) < 0)
+		return 1;
+
+	f = popen("taskset 1 dd if=/dev/zero of=/dev/null", "r");
+	(void) f;
+
+	start_time = time_get_ns();
+	for (;;) {
+		perf_event_poll(pmu_fd);
+		perf_event_read(print_bpf_output);
+	}
+
+	return 0;
+}
diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c
index 9119ac6..c285a3b 100644
--- a/samples/kprobes/jprobe_example.c
+++ b/samples/kprobes/jprobe_example.c
@@ -1,13 +1,13 @@
 /*
  * Here's a sample kernel module showing the use of jprobes to dump
- * the arguments of do_fork().
+ * the arguments of _do_fork().
  *
  * For more information on theory of operation of jprobes, see
  * Documentation/kprobes.txt
  *
  * Build and insert the kernel module as done in the kprobe example.
  * You will see the trace data in /var/log/messages and on the
- * console whenever do_fork() is invoked to create a new process.
+ * console whenever _do_fork() is invoked to create a new process.
  * (Some messages may be suppressed if syslogd is configured to
  * eliminate duplicate messages.)
  */
@@ -17,13 +17,13 @@
 #include <linux/kprobes.h>
 
 /*
- * Jumper probe for do_fork.
+ * Jumper probe for _do_fork.
  * Mirror principle enables access to arguments of the probed routine
  * from the probe handler.
  */
 
-/* Proxy routine having the same arguments as actual do_fork() routine */
-static long jdo_fork(unsigned long clone_flags, unsigned long stack_start,
+/* Proxy routine having the same arguments as actual _do_fork() routine */
+static long j_do_fork(unsigned long clone_flags, unsigned long stack_start,
 	      unsigned long stack_size, int __user *parent_tidptr,
 	      int __user *child_tidptr)
 {
@@ -36,9 +36,9 @@
 }
 
 static struct jprobe my_jprobe = {
-	.entry			= jdo_fork,
+	.entry			= j_do_fork,
 	.kp = {
-		.symbol_name	= "do_fork",
+		.symbol_name	= "_do_fork",
 	},
 };
 
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c
index 366db1a..727eb21 100644
--- a/samples/kprobes/kprobe_example.c
+++ b/samples/kprobes/kprobe_example.c
@@ -1,13 +1,13 @@
 /*
  * NOTE: This example is works on x86 and powerpc.
  * Here's a sample kernel module showing the use of kprobes to dump a
- * stack trace and selected registers when do_fork() is called.
+ * stack trace and selected registers when _do_fork() is called.
  *
  * For more information on theory of operation of kprobes, see
  * Documentation/kprobes.txt
  *
  * You will see the trace data in /var/log/messages and on the console
- * whenever do_fork() is invoked to create a new process.
+ * whenever _do_fork() is invoked to create a new process.
  */
 
 #include <linux/kernel.h>
@@ -16,7 +16,7 @@
 
 /* For each probe you need to allocate a kprobe structure */
 static struct kprobe kp = {
-	.symbol_name	= "do_fork",
+	.symbol_name	= "_do_fork",
 };
 
 /* kprobe pre_handler: called just before the probed instruction is executed */
diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c
index 1041b67..ebb1d1a 100644
--- a/samples/kprobes/kretprobe_example.c
+++ b/samples/kprobes/kretprobe_example.c
@@ -7,7 +7,7 @@
  *
  * usage: insmod kretprobe_example.ko func=<func_name>
  *
- * If no func_name is specified, do_fork is instrumented
+ * If no func_name is specified, _do_fork is instrumented
  *
  * For more information on theory of operation of kretprobes, see
  * Documentation/kprobes.txt
@@ -25,7 +25,7 @@
 #include <linux/limits.h>
 #include <linux/sched.h>
 
-static char func_name[NAME_MAX] = "do_fork";
+static char func_name[NAME_MAX] = "_do_fork";
 module_param_string(func, func_name, NAME_MAX, S_IRUGO);
 MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the"
 			" function's execution time");
diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c
index 6ce5945..b071bf4 100644
--- a/scripts/extract-cert.c
+++ b/scripts/extract-cert.c
@@ -17,13 +17,9 @@
 #include <stdint.h>
 #include <stdbool.h>
 #include <string.h>
-#include <getopt.h>
 #include <err.h>
-#include <arpa/inet.h>
 #include <openssl/bio.h>
-#include <openssl/evp.h>
 #include <openssl/pem.h>
-#include <openssl/pkcs7.h>
 #include <openssl/err.h>
 #include <openssl/engine.h>
 
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 0cd46e1..b967e4f 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -115,7 +115,7 @@
 BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)"
 
 # Setup the directory structure
-rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir"
+rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files
 mkdir -m 755 -p "$tmpdir/DEBIAN"
 mkdir -p "$tmpdir/lib" "$tmpdir/boot"
 mkdir -p "$fwdir/lib/firmware/$version/"
@@ -408,7 +408,7 @@
 	\$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg
 
 clean:
-	rm -rf debian/*tmp
+	rm -rf debian/*tmp debian/files
 	mv debian/ debian.backup # debian/ might be cleaned away
 	\$(MAKE) clean
 	mv debian.backup debian
diff --git a/scripts/sign-file.c b/scripts/sign-file.c
index c3899ca..250a7a6 100755
--- a/scripts/sign-file.c
+++ b/scripts/sign-file.c
@@ -20,13 +20,34 @@
 #include <getopt.h>
 #include <err.h>
 #include <arpa/inet.h>
+#include <openssl/opensslv.h>
 #include <openssl/bio.h>
 #include <openssl/evp.h>
 #include <openssl/pem.h>
-#include <openssl/cms.h>
 #include <openssl/err.h>
 #include <openssl/engine.h>
 
+/*
+ * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to
+ * assume that it's not available and its header file is missing and that we
+ * should use PKCS#7 instead.  Switching to the older PKCS#7 format restricts
+ * the options we have on specifying the X.509 certificate we want.
+ *
+ * Further, older versions of OpenSSL don't support manually adding signers to
+ * the PKCS#7 message so have to accept that we get a certificate included in
+ * the signature message.  Nor do such older versions of OpenSSL support
+ * signing with anything other than SHA1 - so we're stuck with that if such is
+ * the case.
+ */
+#if OPENSSL_VERSION_NUMBER < 0x10000000L
+#define USE_PKCS7
+#endif
+#ifndef USE_PKCS7
+#include <openssl/cms.h>
+#else
+#include <openssl/pkcs7.h>
+#endif
+
 struct module_signature {
 	uint8_t		algo;		/* Public-key crypto algorithm [0] */
 	uint8_t		hash;		/* Digest algorithm [0] */
@@ -110,30 +131,42 @@
 	struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 };
 	char *hash_algo = NULL;
 	char *private_key_name, *x509_name, *module_name, *dest_name;
-	bool save_cms = false, replace_orig;
+	bool save_sig = false, replace_orig;
 	bool sign_only = false;
 	unsigned char buf[4096];
-	unsigned long module_size, cms_size;
-	unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR;
+	unsigned long module_size, sig_size;
+	unsigned int use_signed_attrs;
 	const EVP_MD *digest_algo;
 	EVP_PKEY *private_key;
+#ifndef USE_PKCS7
 	CMS_ContentInfo *cms;
+	unsigned int use_keyid = 0;
+#else
+	PKCS7 *pkcs7;
+#endif
 	X509 *x509;
 	BIO *b, *bd = NULL, *bm;
 	int opt, n;
-
 	OpenSSL_add_all_algorithms();
 	ERR_load_crypto_strings();
 	ERR_clear_error();
 
 	key_pass = getenv("KBUILD_SIGN_PIN");
 
+#ifndef USE_PKCS7
+	use_signed_attrs = CMS_NOATTR;
+#else
+	use_signed_attrs = PKCS7_NOATTR;
+#endif
+
 	do {
 		opt = getopt(argc, argv, "dpk");
 		switch (opt) {
-		case 'p': save_cms = true; break;
-		case 'd': sign_only = true; save_cms = true; break;
+		case 'p': save_sig = true; break;
+		case 'd': sign_only = true; save_sig = true; break;
+#ifndef USE_PKCS7
 		case 'k': use_keyid = CMS_USE_KEYID; break;
+#endif
 		case -1: break;
 		default: format();
 		}
@@ -157,6 +190,14 @@
 		replace_orig = true;
 	}
 
+#ifdef USE_PKCS7
+	if (strcmp(hash_algo, "sha1") != 0) {
+		fprintf(stderr, "sign-file: %s only supports SHA1 signing\n",
+			OPENSSL_VERSION_TEXT);
+		exit(3);
+	}
+#endif
+
 	/* Read the private key and the X.509 cert the PKCS#7 message
 	 * will point to.
 	 */
@@ -213,7 +254,8 @@
 	bm = BIO_new_file(module_name, "rb");
 	ERR(!bm, "%s", module_name);
 
-	/* Load the CMS message from the digest buffer. */
+#ifndef USE_PKCS7
+	/* Load the signature message from the digest buffer. */
 	cms = CMS_sign(NULL, NULL, NULL, NULL,
 		       CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM);
 	ERR(!cms, "CMS_sign");
@@ -221,17 +263,31 @@
 	ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo,
 			     CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP |
 			     use_keyid | use_signed_attrs),
-	    "CMS_sign_add_signer");
+	    "CMS_add1_signer");
 	ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0,
 	    "CMS_final");
 
-	if (save_cms) {
-		char *cms_name;
+#else
+	pkcs7 = PKCS7_sign(x509, private_key, NULL, bm,
+			   PKCS7_NOCERTS | PKCS7_BINARY |
+			   PKCS7_DETACHED | use_signed_attrs);
+	ERR(!pkcs7, "PKCS7_sign");
+#endif
 
-		ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf");
-		b = BIO_new_file(cms_name, "wb");
-		ERR(!b, "%s", cms_name);
-		ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name);
+	if (save_sig) {
+		char *sig_file_name;
+
+		ERR(asprintf(&sig_file_name, "%s.p7s", module_name) < 0,
+		    "asprintf");
+		b = BIO_new_file(sig_file_name, "wb");
+		ERR(!b, "%s", sig_file_name);
+#ifndef USE_PKCS7
+		ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0,
+		    "%s", sig_file_name);
+#else
+		ERR(i2d_PKCS7_bio(b, pkcs7) < 0,
+			"%s", sig_file_name);
+#endif
 		BIO_free(b);
 	}
 
@@ -247,9 +303,13 @@
 	ERR(n < 0, "%s", module_name);
 	module_size = BIO_number_written(bd);
 
+#ifndef USE_PKCS7
 	ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name);
-	cms_size = BIO_number_written(bd) - module_size;
-	sig_info.sig_len = htonl(cms_size);
+#else
+	ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name);
+#endif
+	sig_size = BIO_number_written(bd) - module_size;
+	sig_info.sig_len = htonl(sig_size);
 	ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name);
 	ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name);
 
diff --git a/security/keys/gc.c b/security/keys/gc.c
index c795237..39eac1f 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -134,6 +134,10 @@
 		kdebug("- %u", key->serial);
 		key_check(key);
 
+		/* Throw away the key data */
+		if (key->type->destroy)
+			key->type->destroy(key);
+
 		security_key_free(key);
 
 		/* deal with the user's key tracking and quota */
@@ -148,10 +152,6 @@
 		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
 			atomic_dec(&key->user->nikeys);
 
-		/* now throw away the key memory */
-		if (key->type->destroy)
-			key->type->destroy(key);
-
 		key_user_put(key->user);
 
 		kfree(key->description);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 6434016..26f4039 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4898,7 +4898,7 @@
 	if (sk) {
 		struct sk_security_struct *sksec;
 
-		if (sk->sk_state == TCP_LISTEN)
+		if (sk_listener(sk))
 			/* if the socket is the listening state then this
 			 * packet is a SYN-ACK packet which means it needs to
 			 * be labeled based on the connection/request_sock and
@@ -5005,7 +5005,7 @@
 	 *       unfortunately, this means more work, but it is only once per
 	 *       connection. */
 	if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
-	    !(sk != NULL && sk->sk_state == TCP_LISTEN))
+	    !(sk && sk_listener(sk)))
 		return NF_ACCEPT;
 #endif
 
@@ -5022,7 +5022,7 @@
 			secmark_perm = PACKET__SEND;
 			peer_sid = SECINITSID_KERNEL;
 		}
-	} else if (sk->sk_state == TCP_LISTEN) {
+	} else if (sk_listener(sk)) {
 		/* Locally generated packet but the associated socket is in the
 		 * listening state which means this is a SYN-ACK packet.  In
 		 * this particular case the correct security label is assigned
@@ -5033,7 +5033,11 @@
 		 * selinux_inet_conn_request().  See also selinux_ip_output()
 		 * for similar problems. */
 		u32 skb_sid;
-		struct sk_security_struct *sksec = sk->sk_security;
+		struct sk_security_struct *sksec;
+
+		if (sk->sk_state == TCP_NEW_SYN_RECV)
+			sk = inet_reqsk(sk)->rsk_listener;
+		sksec = sk->sk_security;
 		if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
 			return NF_DROP;
 		/* At this point, if the returned skb peerlbl is SECSID_NULL
@@ -6127,21 +6131,18 @@
 static struct nf_hook_ops selinux_nf_ops[] = {
 	{
 		.hook =		selinux_ipv4_postroute,
-		.owner =	THIS_MODULE,
 		.pf =		NFPROTO_IPV4,
 		.hooknum =	NF_INET_POST_ROUTING,
 		.priority =	NF_IP_PRI_SELINUX_LAST,
 	},
 	{
 		.hook =		selinux_ipv4_forward,
-		.owner =	THIS_MODULE,
 		.pf =		NFPROTO_IPV4,
 		.hooknum =	NF_INET_FORWARD,
 		.priority =	NF_IP_PRI_SELINUX_FIRST,
 	},
 	{
 		.hook =		selinux_ipv4_output,
-		.owner =	THIS_MODULE,
 		.pf =		NFPROTO_IPV4,
 		.hooknum =	NF_INET_LOCAL_OUT,
 		.priority =	NF_IP_PRI_SELINUX_FIRST,
@@ -6149,14 +6150,12 @@
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 	{
 		.hook =		selinux_ipv6_postroute,
-		.owner =	THIS_MODULE,
 		.pf =		NFPROTO_IPV6,
 		.hooknum =	NF_INET_POST_ROUTING,
 		.priority =	NF_IP6_PRI_SELINUX_LAST,
 	},
 	{
 		.hook =		selinux_ipv6_forward,
-		.owner =	THIS_MODULE,
 		.pf =		NFPROTO_IPV6,
 		.hooknum =	NF_INET_FORWARD,
 		.priority =	NF_IP6_PRI_SELINUX_FIRST,
diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c
index a9e41da..6d1706c 100644
--- a/security/smack/smack_netfilter.c
+++ b/security/smack/smack_netfilter.c
@@ -57,7 +57,6 @@
 static struct nf_hook_ops smack_nf_ops[] = {
 	{
 		.hook =		smack_ipv4_output,
-		.owner =	THIS_MODULE,
 		.pf =		NFPROTO_IPV4,
 		.hooknum =	NF_INET_LOCAL_OUT,
 		.priority =	NF_IP_PRI_SELINUX_FIRST,
@@ -65,7 +64,6 @@
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 	{
 		.hook =		smack_ipv6_output,
-		.owner =	THIS_MODULE,
 		.pf =		NFPROTO_IPV6,
 		.hooknum =	NF_INET_LOCAL_OUT,
 		.priority =	NF_IP6_PRI_SELINUX_FIRST,
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 584a034..85813de 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -633,6 +633,7 @@
 	SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
 	SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
 	SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
+	SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
 	{} /* terminator */
 };
 
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index afec6dc..16b8dcb 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5306,6 +5306,7 @@
 	SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
 	SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
 	SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+	SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
 	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 9d947ae..def5cc8 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4520,7 +4520,11 @@
 		return err;
 
 	spec = codec->spec;
-	codec->power_save_node = 1;
+	/* enable power_save_node only for new 92HD89xx chips, as it causes
+	 * click noises on old 92HD73xx chips.
+	 */
+	if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670)
+		codec->power_save_node = 1;
 	spec->linear_tone_beep = 0;
 	spec->gen.mixer_nid = 0x1d;
 	spec->have_spdif_mux = 1;
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
index 58c3164..8c907eb 100644
--- a/sound/soc/au1x/db1200.c
+++ b/sound/soc/au1x/db1200.c
@@ -129,6 +129,8 @@
 	.cpu_dai_name	= "au1xpsc_i2s.2",
 	.platform_name	= "au1xpsc-pcm.2",
 	.codec_name	= "wm8731.0-001b",
+	.dai_fmt	= SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
+			  SND_SOC_DAIFMT_CBM_CFM,
 	.ops		= &db1200_i2s_wm8731_ops,
 };
 
@@ -146,6 +148,8 @@
 	.cpu_dai_name	= "au1xpsc_i2s.3",
 	.platform_name	= "au1xpsc-pcm.3",
 	.codec_name	= "wm8731.0-001b",
+	.dai_fmt	= SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
+			  SND_SOC_DAIFMT_CBM_CFM,
 	.ops		= &db1200_i2s_wm8731_ops,
 };
 
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 268a28b..5c101af 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -519,11 +519,11 @@
 		RT5645_L_VOL_SFT + 1, RT5645_R_VOL_SFT + 1, 63, 0, adc_vol_tlv),
 
 	/* ADC Boost Volume Control */
-	SOC_DOUBLE_TLV("STO1 ADC Boost Gain", RT5645_ADC_BST_VOL1,
+	SOC_DOUBLE_TLV("ADC Boost Capture Volume", RT5645_ADC_BST_VOL1,
 		RT5645_STO1_ADC_L_BST_SFT, RT5645_STO1_ADC_R_BST_SFT, 3, 0,
 		adc_bst_tlv),
-	SOC_DOUBLE_TLV("STO2 ADC Boost Gain", RT5645_ADC_BST_VOL1,
-		RT5645_STO2_ADC_L_BST_SFT, RT5645_STO2_ADC_R_BST_SFT, 3, 0,
+	SOC_DOUBLE_TLV("Mono ADC Boost Capture Volume", RT5645_ADC_BST_VOL2,
+		RT5645_MONO_ADC_L_BST_SFT, RT5645_MONO_ADC_R_BST_SFT, 3, 0,
 		adc_bst_tlv),
 
 	/* I2S2 function select */
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 0e4cfc6..8c964cfb 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -39,8 +39,8 @@
 #define RT5645_STO1_ADC_DIG_VOL			0x1c
 #define RT5645_MONO_ADC_DIG_VOL			0x1d
 #define RT5645_ADC_BST_VOL1			0x1e
-/* Mixer - D-D */
 #define RT5645_ADC_BST_VOL2			0x20
+/* Mixer - D-D */
 #define RT5645_STO1_ADC_MIXER			0x27
 #define RT5645_MONO_ADC_MIXER			0x28
 #define RT5645_AD_DA_MIXER			0x29
@@ -315,12 +315,14 @@
 #define RT5645_STO1_ADC_R_BST_SFT		12
 #define RT5645_STO1_ADC_COMP_MASK		(0x3 << 10)
 #define RT5645_STO1_ADC_COMP_SFT		10
-#define RT5645_STO2_ADC_L_BST_MASK		(0x3 << 8)
-#define RT5645_STO2_ADC_L_BST_SFT		8
-#define RT5645_STO2_ADC_R_BST_MASK		(0x3 << 6)
-#define RT5645_STO2_ADC_R_BST_SFT		6
-#define RT5645_STO2_ADC_COMP_MASK		(0x3 << 4)
-#define RT5645_STO2_ADC_COMP_SFT		4
+
+/* ADC Boost Volume Control (0x20) */
+#define RT5645_MONO_ADC_L_BST_MASK		(0x3 << 14)
+#define RT5645_MONO_ADC_L_BST_SFT		14
+#define RT5645_MONO_ADC_R_BST_MASK		(0x3 << 12)
+#define RT5645_MONO_ADC_R_BST_SFT		12
+#define RT5645_MONO_ADC_COMP_MASK		(0x3 << 10)
+#define RT5645_MONO_ADC_COMP_SFT		10
 
 /* Stereo2 ADC Mixer Control (0x26) */
 #define RT5645_STO2_ADC_SRC_MASK		(0x1 << 15)
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index bfda25e..f540f82 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1376,8 +1376,8 @@
 			sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT);
 
 	snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
-			SGTL5000_BIAS_R_MASK,
-			sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT);
+			SGTL5000_BIAS_VOLT_MASK,
+			sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT);
 	/*
 	 * disable DAP
 	 * TODO:
@@ -1549,7 +1549,7 @@
 			else {
 				sgtl5000->micbias_voltage = 0;
 				dev_err(&client->dev,
-					"Unsuitable MicBias resistor\n");
+					"Unsuitable MicBias voltage\n");
 			}
 		} else {
 			sgtl5000->micbias_voltage = 0;
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index e3a0bca..cc1d398 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -549,7 +549,7 @@
 /*
  * DAC digital volumes. From -7 to 24 dB in 1 dB steps
  */
-static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 0);
+static DECLARE_TLV_DB_SCALE(dac_tlv, -700, 100, 0);
 
 static const char * const tas2552_din_source_select[] = {
 	"Muted",
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 1a82b19..8739126 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1509,14 +1509,17 @@
 	snd_soc_write(codec, PGAL_2_LLOPM_VOL, DEFAULT_VOL);
 	snd_soc_write(codec, PGAR_2_RLOPM_VOL, DEFAULT_VOL);
 
-	/* Line2 to HP Bypass default volume, disconnect from Output Mixer */
-	snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL);
-	snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL);
-	snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL);
-	snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL);
-	/* Line2 Line Out default volume, disconnect from Output Mixer */
-	snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL);
-	snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL);
+	/* On tlv320aic3104, these registers are reserved and must not be written */
+	if (aic3x->model != AIC3X_MODEL_3104) {
+		/* Line2 to HP Bypass default volume, disconnect from Output Mixer */
+		snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL);
+		snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL);
+		snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL);
+		snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL);
+		/* Line2 Line Out default volume, disconnect from Output Mixer */
+		snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL);
+		snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL);
+	}
 
 	switch (aic3x->model) {
 	case AIC3X_MODEL_3X:
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 293e47a..2fbc6ef 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3760,7 +3760,7 @@
 	ret = snd_soc_register_codec(&i2c->dev,
 				     &soc_codec_dev_wm8962, &wm8962_dai, 1);
 	if (ret < 0)
-		goto err_enable;
+		goto err_pm_runtime;
 
 	regcache_cache_only(wm8962->regmap, true);
 
@@ -3769,6 +3769,8 @@
 
 	return 0;
 
+err_pm_runtime:
+	pm_runtime_disable(&i2c->dev);
 err_enable:
 	regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
 err:
@@ -3778,6 +3780,7 @@
 static int wm8962_i2c_remove(struct i2c_client *client)
 {
 	snd_soc_unregister_codec(&client->dev);
+	pm_runtime_disable(&client->dev);
 	return 0;
 }
 
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index a3e97b4..ba34252 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -131,23 +131,32 @@
 
 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		for (i = 0; i < 4; i++)
-			i2s_write_reg(dev->i2s_base, TOR(i), 0);
+			i2s_read_reg(dev->i2s_base, TOR(i));
 	} else {
 		for (i = 0; i < 4; i++)
-			i2s_write_reg(dev->i2s_base, ROR(i), 0);
+			i2s_read_reg(dev->i2s_base, ROR(i));
 	}
 }
 
 static void i2s_start(struct dw_i2s_dev *dev,
 		      struct snd_pcm_substream *substream)
 {
-
+	u32 i, irq;
 	i2s_write_reg(dev->i2s_base, IER, 1);
 
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		for (i = 0; i < 4; i++) {
+			irq = i2s_read_reg(dev->i2s_base, IMR(i));
+			i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x30);
+		}
 		i2s_write_reg(dev->i2s_base, ITER, 1);
-	else
+	} else {
+		for (i = 0; i < 4; i++) {
+			irq = i2s_read_reg(dev->i2s_base, IMR(i));
+			i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x03);
+		}
 		i2s_write_reg(dev->i2s_base, IRER, 1);
+	}
 
 	i2s_write_reg(dev->i2s_base, CER, 1);
 }
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index 48b2d24..b95132e 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -95,7 +95,8 @@
 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
 	case SND_SOC_DAIFMT_I2S:
 		/* data on rising edge of bclk, frame low 1clk before data */
-		strcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0;
+		strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSI |
+			SSI_STCR_TEFS;
 		scr |= SSI_SCR_NET;
 		if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) {
 			scr &= ~SSI_I2S_MODE_MASK;
@@ -104,33 +105,31 @@
 		break;
 	case SND_SOC_DAIFMT_LEFT_J:
 		/* data on rising edge of bclk, frame high with data */
-		strcr |= SSI_STCR_TXBIT0;
+		strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP;
 		break;
 	case SND_SOC_DAIFMT_DSP_B:
 		/* data on rising edge of bclk, frame high with data */
-		strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0;
+		strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL;
 		break;
 	case SND_SOC_DAIFMT_DSP_A:
 		/* data on rising edge of bclk, frame high 1clk before data */
-		strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
+		strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL |
+			SSI_STCR_TEFS;
 		break;
 	}
 
 	/* DAI clock inversion */
 	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
 	case SND_SOC_DAIFMT_IB_IF:
-		strcr |= SSI_STCR_TFSI;
-		strcr &= ~SSI_STCR_TSCKP;
+		strcr ^= SSI_STCR_TSCKP | SSI_STCR_TFSI;
 		break;
 	case SND_SOC_DAIFMT_IB_NF:
-		strcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI);
+		strcr ^= SSI_STCR_TSCKP;
 		break;
 	case SND_SOC_DAIFMT_NB_IF:
-		strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP;
+		strcr ^= SSI_STCR_TFSI;
 		break;
 	case SND_SOC_DAIFMT_NB_NF:
-		strcr &= ~SSI_STCR_TFSI;
-		strcr |= SSI_STCR_TSCKP;
 		break;
 	}
 
diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
index 82e350e..ac75816 100644
--- a/sound/synth/emux/emux_oss.c
+++ b/sound/synth/emux/emux_oss.c
@@ -69,7 +69,8 @@
 	struct snd_seq_oss_reg *arg;
 	struct snd_seq_device *dev;
 
-	if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
+	/* using device#1 here for avoiding conflicts with OPL3 */
+	if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
 			       sizeof(struct snd_seq_oss_reg), &dev) < 0)
 		return;
 
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 2975632..c8fe6d1 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -41,6 +41,7 @@
 	libelf-getphdrnum		\
 	libelf-mmap			\
 	libnuma				\
+	numa_num_possible_cpus		\
 	libperl				\
 	libpython			\
 	libpython-version		\
@@ -51,7 +52,8 @@
 	timerfd				\
 	libdw-dwarf-unwind		\
 	zlib				\
-	lzma
+	lzma				\
+	get_cpuid
 
 FEATURE_DISPLAY ?=			\
 	dwarf				\
@@ -61,13 +63,15 @@
 	libbfd				\
 	libelf				\
 	libnuma				\
+	numa_num_possible_cpus		\
 	libperl				\
 	libpython			\
 	libslang			\
 	libunwind			\
 	libdw-dwarf-unwind		\
 	zlib				\
-	lzma
+	lzma				\
+	get_cpuid
 
 # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
 # If in the future we need per-feature checks/flags for features not
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 74ca420..e43a297 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -19,6 +19,7 @@
 	test-libelf-getphdrnum.bin	\
 	test-libelf-mmap.bin		\
 	test-libnuma.bin		\
+	test-numa_num_possible_cpus.bin	\
 	test-libperl.bin		\
 	test-libpython.bin		\
 	test-libpython-version.bin	\
@@ -34,7 +35,8 @@
 	test-compile-x32.bin		\
 	test-zlib.bin			\
 	test-lzma.bin			\
-	test-bpf.bin
+	test-bpf.bin			\
+	test-get_cpuid.bin
 
 CC := $(CROSS_COMPILE)gcc -MD
 PKG_CONFIG := $(CROSS_COMPILE)pkg-config
@@ -87,6 +89,9 @@
 test-libnuma.bin:
 	$(BUILD) -lnuma
 
+test-numa_num_possible_cpus.bin:
+	$(BUILD) -lnuma
+
 test-libunwind.bin:
 	$(BUILD) -lelf
 
@@ -162,6 +167,9 @@
 test-lzma.bin:
 	$(BUILD) -llzma
 
+test-get_cpuid.bin:
+	$(BUILD)
+
 test-bpf.bin:
 	$(BUILD)
 
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 84689a6..33cf6f2 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -77,6 +77,10 @@
 # include "test-libnuma.c"
 #undef main
 
+#define main main_test_numa_num_possible_cpus
+# include "test-numa_num_possible_cpus.c"
+#undef main
+
 #define main main_test_timerfd
 # include "test-timerfd.c"
 #undef main
@@ -117,6 +121,10 @@
 # include "test-lzma.c"
 #undef main
 
+#define main main_test_get_cpuid
+# include "test-get_cpuid.c"
+#undef main
+
 int main(int argc, char *argv[])
 {
 	main_test_libpython();
@@ -136,6 +144,7 @@
 	main_test_libbfd();
 	main_test_backtrace();
 	main_test_libnuma();
+	main_test_numa_num_possible_cpus();
 	main_test_timerfd();
 	main_test_stackprotector_all();
 	main_test_libdw_dwarf_unwind();
@@ -143,6 +152,7 @@
 	main_test_zlib();
 	main_test_pthread_attr_setaffinity_np();
 	main_test_lzma();
+	main_test_get_cpuid();
 
 	return 0;
 }
diff --git a/tools/build/feature/test-get_cpuid.c b/tools/build/feature/test-get_cpuid.c
new file mode 100644
index 0000000..d7a2c40
--- /dev/null
+++ b/tools/build/feature/test-get_cpuid.c
@@ -0,0 +1,7 @@
+#include <cpuid.h>
+
+int main(void)
+{
+	unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
+	return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
+}
diff --git a/tools/build/feature/test-numa_num_possible_cpus.c b/tools/build/feature/test-numa_num_possible_cpus.c
new file mode 100644
index 0000000..2606e94
--- /dev/null
+++ b/tools/build/feature/test-numa_num_possible_cpus.c
@@ -0,0 +1,6 @@
+#include <numa.h>
+
+int main(void)
+{
+	return numa_num_possible_cpus();
+}
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 4d88593..cf42b09 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -3795,7 +3795,7 @@
 	struct format_field *field;
 	struct printk_map *printk;
 	long long val, fval;
-	unsigned long addr;
+	unsigned long long addr;
 	char *str;
 	unsigned char *hex;
 	int print;
@@ -3828,13 +3828,30 @@
 		 */
 		if (!(field->flags & FIELD_IS_ARRAY) &&
 		    field->size == pevent->long_size) {
-			addr = *(unsigned long *)(data + field->offset);
+
+			/* Handle heterogeneous recording and processing
+			 * architectures
+			 *
+			 * CASE I:
+			 * Traces recorded on 32-bit devices (32-bit
+			 * addressing) and processed on 64-bit devices:
+			 * In this case, only 32 bits should be read.
+			 *
+			 * CASE II:
+			 * Traces recorded on 64 bit devices and processed
+			 * on 32-bit devices:
+			 * In this case, 64 bits must be read.
+			 */
+			addr = (pevent->long_size == 8) ?
+				*(unsigned long long *)(data + field->offset) :
+				(unsigned long long)*(unsigned int *)(data + field->offset);
+
 			/* Check if it matches a print format */
 			printk = find_printk(pevent, addr);
 			if (printk)
 				trace_seq_puts(s, printk->printk);
 			else
-				trace_seq_printf(s, "%lx", addr);
+				trace_seq_printf(s, "%llx", addr);
 			break;
 		}
 		str = malloc(len + 1);
diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt
index 4a0501d..c94c9de 100644
--- a/tools/perf/Documentation/intel-pt.txt
+++ b/tools/perf/Documentation/intel-pt.txt
@@ -364,21 +364,6 @@
 
 		CYC packets are not requested by default.
 
-no_force_psb	This is a driver option and is not in the IA32_RTIT_CTL MSR.
-
-		It stops the driver resetting the byte count to zero whenever
-		enabling the trace (for example on context switches) which in
-		turn results in no PSB being forced.  However some processors
-		will produce a PSB anyway.
-
-		In any case, there is still a PSB when the trace is enabled for
-		the first time.
-
-		no_force_psb can be used to slightly decrease the trace size but
-		may make it harder for the decoder to recover from errors.
-
-		no_force_psb is not selected by default.
-
 
 new snapshot option
 -------------------
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 827557f..38a0853 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -573,9 +573,14 @@
     msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev);
     NO_LIBNUMA := 1
   else
-    CFLAGS += -DHAVE_LIBNUMA_SUPPORT
-    EXTLIBS += -lnuma
-    $(call detected,CONFIG_NUMA)
+    ifeq ($(feature-numa_num_possible_cpus), 0)
+      msg := $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8);
+      NO_LIBNUMA := 1
+    else
+      CFLAGS += -DHAVE_LIBNUMA_SUPPORT
+      EXTLIBS += -lnuma
+      $(call detected,CONFIG_NUMA)
+    endif
   endif
 endif
 
@@ -621,8 +626,13 @@
 endif
 
 ifndef NO_AUXTRACE
-  $(call detected,CONFIG_AUXTRACE)
-  CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+  ifeq ($(feature-get_cpuid), 0)
+    msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
+    NO_AUXTRACE := 1
+  else
+    $(call detected,CONFIG_AUXTRACE)
+    CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+  endif
 endif
 
 # Among the variables below, these:
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 349bc96..e5f18a2 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -17,6 +17,7 @@
 libperf-y += llvm-utils.o
 libperf-y += parse-options.o
 libperf-y += parse-events.o
+libperf-y += perf_regs.o
 libperf-y += path.o
 libperf-y += rbtree.o
 libperf-y += bitmap.o
@@ -103,7 +104,6 @@
 
 libperf-y += scripting-engines/
 
-libperf-$(CONFIG_PERF_REGS) += perf_regs.o
 libperf-$(CONFIG_ZLIB) += zlib.o
 libperf-$(CONFIG_LZMA) += lzma.o
 
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index 885e8ac..6b8eb13 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -6,6 +6,7 @@
 	SMPL_REG_END
 };
 
+#ifdef HAVE_PERF_REGS_SUPPORT
 int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
 {
 	int i, idx = 0;
@@ -29,3 +30,4 @@
 	*valp = regs->cache_regs[id];
 	return 0;
 }
+#endif
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index 2984dcc..679d6e4 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -2,6 +2,7 @@
 #define __PERF_REGS_H
 
 #include <linux/types.h>
+#include <linux/compiler.h>
 
 struct regs_dump;
 
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index eb5f18b..c6f9af7 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -270,12 +270,13 @@
 	int ret = 0;
 
 	if (module) {
-		list_for_each_entry(dso, &host_machine->dsos.head, node) {
-			if (!dso->kernel)
-				continue;
-			if (strncmp(dso->short_name + 1, module,
-				    dso->short_name_len - 2) == 0)
-				goto found;
+		char module_name[128];
+
+		snprintf(module_name, sizeof(module_name), "[%s]", module);
+		map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name);
+		if (map) {
+			dso = map->dso;
+			goto found;
 		}
 		pr_debug("Failed to find module %s.\n", module);
 		return -ENOENT;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 8a4537e..fc3f7c9 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1580,7 +1580,10 @@
 	file_offset = page_offset;
 	head = data_offset - page_offset;
 
-	if (data_size && (data_offset + data_size < file_size))
+	if (data_size == 0)
+		goto out;
+
+	if (data_offset + data_size < file_size)
 		file_size = data_offset + data_size;
 
 	ui_progress__init(&prog, file_size, "Processing events...");
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 415c359..2d065d0 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -196,7 +196,8 @@
 		memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
 }
 
-static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip)
+static int check_per_pkg(struct perf_evsel *counter,
+			 struct perf_counts_values *vals, int cpu, bool *skip)
 {
 	unsigned long *mask = counter->per_pkg_mask;
 	struct cpu_map *cpus = perf_evsel__cpus(counter);
@@ -218,6 +219,17 @@
 		counter->per_pkg_mask = mask;
 	}
 
+	/*
+	 * we do not consider an event that has not run as a good
+	 * instance to mark a package as used (skip=1). Otherwise
+	 * we may run into a situation where the first CPU in a package
+	 * is not running anything, yet the second is, and this function
+	 * would mark the package as used after the first CPU and would
+	 * not read the values from the second CPU.
+	 */
+	if (!(vals->run && vals->ena))
+		return 0;
+
 	s = cpu_map__get_socket(cpus, cpu);
 	if (s < 0)
 		return -1;
@@ -235,7 +247,7 @@
 	static struct perf_counts_values zero;
 	bool skip = false;
 
-	if (check_per_pkg(evsel, cpu, &skip)) {
+	if (check_per_pkg(evsel, count, cpu, &skip)) {
 		pr_err("failed to read per-pkg counter\n");
 		return -1;
 	}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 53bb5f5..475d88d 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -38,7 +38,7 @@
 #endif
 
 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
-int elf_getphdrnum(Elf *elf, size_t *dst)
+static int elf_getphdrnum(Elf *elf, size_t *dst)
 {
 	GElf_Ehdr gehdr;
 	GElf_Ehdr *ehdr;
@@ -1271,8 +1271,6 @@
 static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
 		       bool temp)
 {
-	GElf_Ehdr *ehdr;
-
 	kcore->elfclass = elfclass;
 
 	if (temp)
@@ -1289,9 +1287,7 @@
 	if (!gelf_newehdr(kcore->elf, elfclass))
 		goto out_end;
 
-	ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
-	if (!ehdr)
-		goto out_end;
+	memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
 
 	return 0;
 
@@ -1348,23 +1344,18 @@
 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
 			   u64 addr, u64 len)
 {
-	GElf_Phdr gphdr;
-	GElf_Phdr *phdr;
+	GElf_Phdr phdr = {
+		.p_type		= PT_LOAD,
+		.p_flags	= PF_R | PF_W | PF_X,
+		.p_offset	= offset,
+		.p_vaddr	= addr,
+		.p_paddr	= 0,
+		.p_filesz	= len,
+		.p_memsz	= len,
+		.p_align	= page_size,
+	};
 
-	phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
-	if (!phdr)
-		return -1;
-
-	phdr->p_type	= PT_LOAD;
-	phdr->p_flags	= PF_R | PF_W | PF_X;
-	phdr->p_offset	= offset;
-	phdr->p_vaddr	= addr;
-	phdr->p_paddr	= 0;
-	phdr->p_filesz	= len;
-	phdr->p_memsz	= len;
-	phdr->p_align	= page_size;
-
-	if (!gelf_update_phdr(kcore->elf, idx, phdr))
+	if (!gelf_update_phdr(kcore->elf, idx, &phdr))
 		return -1;
 
 	return 0;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 7acafb3..c2cd9bf2 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -709,7 +709,7 @@
 
 	dir = opendir(procfs__mountpoint());
 	if (!dir)
-		return -1;
+		return false;
 
 	/* Walk through the directory. */
 	while (ret && (d = readdir(dir)) != NULL) {
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 9655cb4..bde0ef1 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -71,8 +71,11 @@
 unsigned int extra_msr_offset64;
 unsigned int extra_delta_offset32;
 unsigned int extra_delta_offset64;
+unsigned int aperf_mperf_multiplier = 1;
 int do_smi;
 double bclk;
+double base_hz;
+double tsc_tweak = 1.0;
 unsigned int show_pkg;
 unsigned int show_core;
 unsigned int show_cpu;
@@ -502,7 +505,7 @@
 	/* %Busy */
 	if (has_aperf) {
 		if (!skip_c0)
-			outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
+			outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak);
 		else
 			outp += sprintf(outp, "********");
 	}
@@ -510,7 +513,7 @@
 	/* Bzy_MHz */
 	if (has_aperf)
 		outp += sprintf(outp, "%8.0f",
-			1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
+			1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float);
 
 	/* TSC_MHz */
 	outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
@@ -984,6 +987,8 @@
 			return -3;
 		if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
 			return -4;
+		t->aperf = t->aperf * aperf_mperf_multiplier;
+		t->mperf = t->mperf * aperf_mperf_multiplier;
 	}
 
 	if (do_smi) {
@@ -1149,6 +1154,19 @@
 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 
+
+static void
+calculate_tsc_tweak()
+{
+	unsigned long long msr;
+	unsigned int base_ratio;
+
+	get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
+	base_ratio = (msr >> 8) & 0xFF;
+	base_hz = base_ratio * bclk * 1000000;
+	tsc_tweak = base_hz / tsc_hz;
+}
+
 static void
 dump_nhm_platform_info(void)
 {
@@ -1926,8 +1944,6 @@
 
 	switch (model) {
 	case 0x3A:	/* IVB */
-	case 0x3E:	/* IVB Xeon */
-
 	case 0x3C:	/* HSW */
 	case 0x3F:	/* HSX */
 	case 0x45:	/* HSW */
@@ -2543,6 +2559,13 @@
 	return 0;
 }
 
+unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model)
+{
+	if (is_knl(family, model))
+		return 1024;
+	return 1;
+}
+
 #define SLM_BCLK_FREQS 5
 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
 
@@ -2744,6 +2767,9 @@
 		}
 	}
 
+	if (has_aperf)
+		aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model);
+
 	do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
 	do_snb_cstates = has_snb_msrs(family, model);
 	do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
@@ -2762,6 +2788,9 @@
 	if (debug)
 		dump_cstate_pstate_config_info();
 
+	if (has_skl_msrs(family, model))
+		calculate_tsc_tweak();
+
 	return;
 }
 
@@ -3090,7 +3119,7 @@
 }
 
 void print_version() {
-	fprintf(stderr, "turbostat version 4.7 17-June, 2015"
+	fprintf(stderr, "turbostat version 4.8 26-Sep, 2015"
 		" - Len Brown <lenb@kernel.org>\n");
 }
 
diff --git a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
index d1b6475..6cae061 100644
--- a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
+++ b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
@@ -25,10 +25,19 @@
 
 #define FIXUP_SECTION ".ex_fixup"
 
+static inline unsigned long __fls(unsigned long x);
+
 #include "word-at-a-time.h"
 
 #include "utils.h"
 
+static inline unsigned long __fls(unsigned long x)
+{
+	int lz;
+
+	asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
+	return sizeof(unsigned long) - 1 - lz;
+}
 
 static int page_size;
 static char *mem_region;